code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =StableDiffusionSAGPipeline
UpperCamelCase_ : Tuple =TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Tuple =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : Union[str, Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Optional[Any] =False
def UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase :Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
UpperCamelCase :List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase :Optional[int] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase :Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Optional[int]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
UpperCamelCase :List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase :Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCamelCase :List[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE_ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = '''.'''
UpperCamelCase :int = torch.manual_seed(0 )
UpperCamelCase :List[Any] = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCamelCase :Tuple = output.images
UpperCamelCase :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :int = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCamelCase :Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE_ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = '''.'''
UpperCamelCase :Dict = torch.manual_seed(0 )
UpperCamelCase :Optional[Any] = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCamelCase :Dict = output.images
UpperCamelCase :str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :List[str] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[int] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCamelCase :Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE_ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = '''.'''
UpperCamelCase :List[str] = torch.manual_seed(0 )
UpperCamelCase :Tuple = sag_pipe(
[prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
UpperCamelCase :List[str] = output.images
assert image.shape == (1, 512, 768, 3)
| 658 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M'
UpperCamelCase_ : Optional[Any] =(
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCamelCase_ : Dict ='translator'
UpperCamelCase_ : Any =AutoTokenizer
UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM
UpperCamelCase_ : List[Any] =LANGUAGE_CODES
UpperCamelCase_ : int =['text', 'text', 'text']
UpperCamelCase_ : Union[str, Any] =['text']
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
UpperCamelCase :Optional[int] = self.lang_to_code[src_lang]
UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : str =['image_processor', 'tokenizer']
UpperCamelCase_ : Optional[int] ='BridgeTowerImageProcessor'
UpperCamelCase_ : Optional[Any] =('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
UpperCamelCase :Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# add pixel_values + pixel_mask
UpperCamelCase :List[str] = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , do_center_crop=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :List[Any] = self.tokenizer.model_input_names
UpperCamelCase :int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 658 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if array[i] == target:
return i
return -1
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Tuple = 0
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1
UpperCamelCase :str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase :int = one_third - 1
elif array[two_third] < target:
UpperCamelCase :Any = two_third + 1
else:
UpperCamelCase :Any = one_third + 1
UpperCamelCase :int = two_third - 1
else:
return -1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = (left + right) // 3 + 1
UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 658 | 1 |
import math
import random
def _A ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__snake_case = 0.0_2
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Forward propagation
UpperCamelCase :Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase :List[str] = (expected / 100) - layer_a
# Error delta
UpperCamelCase :Optional[int] = layer_1_error * sigmoid_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = int(input("""Expected value: """))
__snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658 | 1 |
from math import sqrt
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( SCREAMING_SNAKE_CASE__ : int = 10001 ):
UpperCamelCase :List[str] = 0
UpperCamelCase :int = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE__ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='focalnet'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :int = embed_dim
UpperCamelCase :Optional[Any] = use_conv_embed
UpperCamelCase :str = hidden_sizes
UpperCamelCase :str = depths
UpperCamelCase :Optional[int] = focal_levels
UpperCamelCase :Tuple = focal_windows
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Optional[int] = mlp_ratio
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :int = drop_path_rate
UpperCamelCase :Dict = use_layerscale
UpperCamelCase :List[str] = layerscale_value
UpperCamelCase :Tuple = use_post_layernorm
UpperCamelCase :int = use_post_layernorm_in_modulation
UpperCamelCase :str = normalize_modulator
UpperCamelCase :Any = initializer_range
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Dict = encoder_stride
UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 658 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ):
UpperCamelCase :Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase :int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> str:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , movq=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if latents is None:
UpperCamelCase :Dict = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCamelCase :Union[str, Any] = latents.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCamelCase :Union[str, Any] = torch.device(F'''cuda:{gpu_id}''' )
UpperCamelCase :List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCamelCase :Optional[int] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase :List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase :Union[str, Any] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_module_hook=SCREAMING_SNAKE_CASE_ )
# We'll offload the last model manually.
UpperCamelCase :int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = 4.0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ) -> Dict:
UpperCamelCase :Union[str, Any] = self._execution_device
UpperCamelCase :str = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :int = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCamelCase :Dict = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :int = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :Any = hint.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE_ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.scheduler.timesteps
UpperCamelCase :Any = self.movq.config.latent_channels
UpperCamelCase , UpperCamelCase :Any = downscale_height_and_width(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.movq_scale_factor )
# create initial latent
UpperCamelCase :Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
UpperCamelCase :str = self.unet(
sample=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , added_cond_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :int = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase , UpperCamelCase :int = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase :Optional[int] = variance_pred.chunk(2 )
UpperCamelCase :Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase :Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase :Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :Optional[Any] = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )[0]
# post-processing
UpperCamelCase :Dict = self.movq.decode(SCREAMING_SNAKE_CASE_ , force_not_quantize=SCREAMING_SNAKE_CASE_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCamelCase :str = image * 0.5 + 0.5
UpperCamelCase :Union[str, Any] = image.clamp(0 , 1 )
UpperCamelCase :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase :Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 658 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Tuple = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Any = patch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :int = is_training
UpperCamelCase :str = use_labels
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :List[Any] = backbone_out_indices
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :int = backbone_featmap_shape
UpperCamelCase :Any = scope
UpperCamelCase :int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Dict = (image_size // patch_size) ** 2
UpperCamelCase :List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Optional[Any] = self.num_labels
UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Optional[int] = self.num_labels
UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Tuple =False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Any = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[Any] = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Any:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] ='switch_transformers'
UpperCamelCase_ : Any =['past_key_values']
UpperCamelCase_ : Union[str, Any] ={'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , SCREAMING_SNAKE_CASE_=3_2128 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.01 , SCREAMING_SNAKE_CASE_="float32" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=128 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=0.001 , SCREAMING_SNAKE_CASE_=0.001 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , **SCREAMING_SNAKE_CASE_ , ) -> int:
UpperCamelCase :Optional[Any] = vocab_size
UpperCamelCase :Union[str, Any] = d_model
UpperCamelCase :List[str] = d_kv
UpperCamelCase :Any = d_ff
UpperCamelCase :Optional[int] = num_sparse_encoder_layers
UpperCamelCase :List[str] = num_layers
UpperCamelCase :int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase :List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCamelCase :Union[str, Any] = self.num_layers // self.num_sparse_encoder_layers
else:
UpperCamelCase :Any = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCamelCase :List[str] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCamelCase :Optional[int] = self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCamelCase :str = num_heads
UpperCamelCase :Optional[Any] = num_experts
UpperCamelCase :Dict = expert_capacity
UpperCamelCase :str = router_bias
UpperCamelCase :Union[str, Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
UpperCamelCase :str = router_dtype
UpperCamelCase :Tuple = router_ignore_padding_tokens
UpperCamelCase :Any = relative_attention_num_buckets
UpperCamelCase :Any = relative_attention_max_distance
UpperCamelCase :Union[str, Any] = dropout_rate
UpperCamelCase :List[str] = layer_norm_epsilon
UpperCamelCase :str = initializer_factor
UpperCamelCase :List[str] = feed_forward_proj
UpperCamelCase :List[Any] = use_cache
UpperCamelCase :Optional[Any] = add_router_probs
UpperCamelCase :Tuple = router_z_loss_coef
UpperCamelCase :str = router_aux_loss_coef
UpperCamelCase :int = self.feed_forward_proj.split('''-''' )
UpperCamelCase :List[str] = act_info[-1]
UpperCamelCase :str = act_info[0] == '''gated'''
if len(SCREAMING_SNAKE_CASE_ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase :List[str] = '''gelu_new'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 658 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase :Any = 128
elif "12-12" in model_name:
UpperCamelCase :Union[str, Any] = 12
UpperCamelCase :Any = 12
elif "14-14" in model_name:
UpperCamelCase :Optional[int] = 14
UpperCamelCase :List[str] = 14
elif "16-16" in model_name:
UpperCamelCase :List[Any] = 16
UpperCamelCase :Optional[Any] = 16
else:
raise ValueError('''Model not supported''' )
UpperCamelCase :Tuple = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCamelCase :Optional[Any] = 35
UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json'''
else:
UpperCamelCase :Optional[int] = 527
UpperCamelCase :List[Any] = '''audioset-id2label.json'''
UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :List[Any] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
return config
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if "module.v" in name:
UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
UpperCamelCase :Any = key.split('''.''' )
UpperCamelCase :str = int(key_split[3] )
UpperCamelCase :Union[str, Any] = config.hidden_size
if "weight" in key:
UpperCamelCase :List[str] = val[:dim, :]
UpperCamelCase :Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase :Optional[Any] = val[-dim:, :]
else:
UpperCamelCase :Dict = val[:dim]
UpperCamelCase :Optional[int] = val[dim : dim * 2]
UpperCamelCase :List[Any] = val[-dim:]
else:
UpperCamelCase :Union[str, Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ):
UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCamelCase :Optional[int] = model_name_to_url[model_name]
UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128
UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array''']
else:
UpperCamelCase :List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = waveform.squeeze().numpy()
UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( lowercase, lowercase, lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , ) -> List[str]:
super().__init__()
UpperCamelCase :List[str] = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = False
UpperCamelCase :Any = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , d_model=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , d_kv=SCREAMING_SNAKE_CASE_ , d_ff=SCREAMING_SNAKE_CASE_ , dropout_rate=SCREAMING_SNAKE_CASE_ , feed_forward_proj=SCREAMING_SNAKE_CASE_ , is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Tuple = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Tuple = TaBlock(SCREAMING_SNAKE_CASE_ )
self.encoders.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = TaLayerNorm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Dict = self.token_embedder(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = encoder_input_tokens.shape[1]
UpperCamelCase :Tuple = torch.arange(SCREAMING_SNAKE_CASE_ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = self.dropout_pre(SCREAMING_SNAKE_CASE_ )
# inverted the attention mask
UpperCamelCase :Optional[int] = encoder_input_tokens.size()
UpperCamelCase :str = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for lyr in self.encoders:
UpperCamelCase :Optional[Any] = lyr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase :List[str] = self.layer_norm(SCREAMING_SNAKE_CASE_ )
return self.dropout_post(SCREAMING_SNAKE_CASE_ ), encoder_inputs_mask
| 658 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__snake_case = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__snake_case = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__snake_case = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__snake_case = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCamelCase :Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , SCREAMING_SNAKE_CASE__ )
return [m.group(0 ) for m in matches]
def _A ( ):
UpperCamelCase :Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCamelCase :str = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCamelCase :List[Any] = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = None
if _re_tf_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
UpperCamelCase :Any = tf_models
UpperCamelCase :Optional[Any] = _re_tf_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
UpperCamelCase :Union[str, Any] = flax_models
UpperCamelCase :Optional[Any] = _re_flax_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
UpperCamelCase :List[Any] = pt_models
UpperCamelCase :Any = _re_pt_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE__ ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCamelCase :List[str] = True
break
# Try again after removing the last word in the name
UpperCamelCase :Optional[Any] = ''''''.join(camel_case_split(SCREAMING_SNAKE_CASE__ )[:-1] )
UpperCamelCase :int = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCamelCase :Any = list(SCREAMING_SNAKE_CASE__ )
all_models.sort()
UpperCamelCase :Optional[int] = {'''model_type''': all_models}
UpperCamelCase :Dict = [pt_models[t] for t in all_models]
UpperCamelCase :Optional[Any] = [tf_models[t] for t in all_models]
UpperCamelCase :Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCamelCase :Union[str, Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCamelCase :Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCamelCase :Optional[Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCamelCase :List[str] = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCamelCase :Tuple = '''AutoTokenizer'''
UpperCamelCase :List[Any] = [processors[t] for t in all_models]
return pd.DataFrame(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
UpperCamelCase :Optional[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCamelCase :Optional[Any] = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
UpperCamelCase :List[str] = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# The type of pipeline may not exist in this framework
if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
continue
# First extract all model_names
UpperCamelCase :List[str] = []
for name in getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).values():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model_names.append(SCREAMING_SNAKE_CASE__ )
else:
model_names.extend(list(SCREAMING_SNAKE_CASE__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :Optional[Any] = get_frameworks_table()
UpperCamelCase :List[str] = Dataset.from_pandas(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = Dataset.from_json(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
}
UpperCamelCase :Union[str, Any] = update_pipeline_and_auto_class_table(SCREAMING_SNAKE_CASE__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCamelCase :Dict = sorted(table.keys() )
UpperCamelCase :List[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCamelCase :Optional[Any] = Dataset.from_pandas(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCamelCase :List[str] = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
UpperCamelCase :int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , token=SCREAMING_SNAKE_CASE__ , commit_message=SCREAMING_SNAKE_CASE__ , )
def _A ( ):
UpperCamelCase :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCamelCase :Optional[int] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCamelCase :Dict = []
for key in pipeline_tasks:
if key not in in_table:
UpperCamelCase :Union[str, Any] = pipeline_tasks[key]['''pt''']
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
UpperCamelCase :int = model[0]
UpperCamelCase :Any = model.__name__
if model not in in_table.values():
missing.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
UpperCamelCase :str = ''', '''.join(SCREAMING_SNAKE_CASE__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
__snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 658 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__snake_case = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__snake_case = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Tuple = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0
UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ )
return {
"accuracy": accuracy,
}
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__snake_case = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def _A ( SCREAMING_SNAKE_CASE__ : Iterable[str] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = iter(SCREAMING_SNAKE_CASE__ )
while True:
UpperCamelCase :List[str] = tuple(itertools.islice(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not chunk:
return
yield chunk
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Tuple = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
UpperCamelCase :Tuple = ''''''
if len(SCREAMING_SNAKE_CASE__ ) < 2:
return dirty
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(SCREAMING_SNAKE_CASE__ ) & 1:
clean += "X"
return clean
def _A ( SCREAMING_SNAKE_CASE__ : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
UpperCamelCase :List[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
UpperCamelCase :Any = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(SCREAMING_SNAKE_CASE__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(SCREAMING_SNAKE_CASE__ )
return table
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :str = generate_table(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = prepare_input(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(SCREAMING_SNAKE_CASE__ , 2 ):
UpperCamelCase , UpperCamelCase :Tuple = divmod(table.index(SCREAMING_SNAKE_CASE__ ) , 5 )
UpperCamelCase , UpperCamelCase :List[Any] = divmod(table.index(SCREAMING_SNAKE_CASE__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :List[Any] = generate_table(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(SCREAMING_SNAKE_CASE__ , 2 ):
UpperCamelCase , UpperCamelCase :Optional[int] = divmod(table.index(SCREAMING_SNAKE_CASE__ ) , 5 )
UpperCamelCase , UpperCamelCase :Tuple = divmod(table.index(SCREAMING_SNAKE_CASE__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 658 |
from __future__ import annotations
from collections.abc import Callable
def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ):
UpperCamelCase :Optional[Any] = x_start
UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase :Any = (x_end - x_start) / steps + xa
UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase :Optional[int] = xa
UpperCamelCase :List[str] = fxa
return area
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__snake_case = 10
while i <= 10_00_00:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 658 | 1 |
from copy import deepcopy
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase :int = size
UpperCamelCase :str = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = deepcopy(SCREAMING_SNAKE_CASE_ )
for i in range(1 , self.size ):
UpperCamelCase :Union[str, Any] = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase ( self ) -> list[int]:
UpperCamelCase :str = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase :int = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ) -> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ) -> int:
return index - (index & (-index))
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase :Any = self.next_(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.add(SCREAMING_SNAKE_CASE_ , value - self.get(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
if right == 0:
return 0
UpperCamelCase :Dict = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase :str = self.prev(SCREAMING_SNAKE_CASE_ )
return result
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.query(SCREAMING_SNAKE_CASE_ , index + 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase :Dict = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase :Dict = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,)
UpperCamelCase_ : Any =10
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = 10
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps[0]
UpperCamelCase :Union[str, Any] = scheduler.timesteps[1]
UpperCamelCase :str = self.dummy_sample
UpperCamelCase :List[str] = 0.1 * sample
UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> List[str]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[Any] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps
UpperCamelCase :Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = self.dummy_model()
UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = pred_prev_sample
UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Dict = self.scheduler_classes[0]
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = scheduler.timesteps
UpperCamelCase :int = torch.manual_seed(0 )
UpperCamelCase :str = self.dummy_model()
UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :int = pred_prev_sample
UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :Tuple = self.get_scheduler_config()
UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [39, 30, 12, 1, 0]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[int] = self.scheduler_classes[0]
UpperCamelCase :List[str] = self.get_scheduler_config()
UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return None
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return None
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =[
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> int:
from transformers import BertModel
UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[str]:
from transformers import BertModel
UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[Any]:
from transformers import TFBertModel
UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 658 | 1 |
from __future__ import annotations
__snake_case = """#"""
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase :dict = {}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase :Optional[Any] = self._trie
for char in text:
if char not in trie:
UpperCamelCase :Dict = {}
UpperCamelCase :int = trie[char]
UpperCamelCase :List[str] = True
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> tuple | list:
UpperCamelCase :Union[str, Any] = self._trie
for char in prefix:
if char in trie:
UpperCamelCase :Tuple = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> tuple:
UpperCamelCase :List[Any] = []
for c, v in d.items():
UpperCamelCase :Optional[int] = [''' '''] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )]
result.extend(SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
__snake_case = Trie()
__snake_case = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :int = trie.find_word(SCREAMING_SNAKE_CASE__ )
return tuple(string + word for word in suffixes )
def _A ( ):
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase :List[str] = 5
# Realm tok
UpperCamelCase :List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = self.get_config()
UpperCamelCase :str = self.get_dummy_retriever()
UpperCamelCase :int = retriever.tokenizer
UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' )
UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Tuple = tokenizer(
['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Optional[Any] = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = self.get_config()
UpperCamelCase :Union[str, Any] = self.get_dummy_retriever()
UpperCamelCase :Dict = retriever.tokenizer
UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' )
UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Optional[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Any = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCamelCase :Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 658 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
UpperCamelCase :Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
UpperCamelCase :int = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
UpperCamelCase :Tuple = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase :List[str] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCamelCase :List[str] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(SCREAMING_SNAKE_CASE__ )-1}''' )
if "norm" in key:
UpperCamelCase :int = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase :str = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
UpperCamelCase :Union[str, Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(SCREAMING_SNAKE_CASE__ )-1}''' )
if "layer_norm1" in key:
UpperCamelCase :Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCamelCase :Dict = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase :List[str] = key[key.find('''block''' ) + len('''block''' )]
UpperCamelCase :str = key.replace(F'''block{idx}''' , F'''block.{int(SCREAMING_SNAKE_CASE__ )-1}''' )
if "attn.q" in key:
UpperCamelCase :Any = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
UpperCamelCase :List[str] = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
UpperCamelCase :Optional[int] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
UpperCamelCase :Dict = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
UpperCamelCase :str = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
UpperCamelCase :List[Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
UpperCamelCase :Optional[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
UpperCamelCase :Optional[Any] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase :Optional[int] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCamelCase :Dict = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(SCREAMING_SNAKE_CASE__ )-1}''' )
if key.startswith('''head''' ):
UpperCamelCase :str = key.replace('''head''' , '''classifier''' )
UpperCamelCase :List[Any] = value
return new_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase :Optional[int] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCamelCase :str = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase :List[str] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase :List[Any] = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase :List[str] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase :List[Any] = kv_bias[
config.hidden_sizes[i] :
]
def _A ( ):
UpperCamelCase :str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase :Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Optional[Any] = SegformerConfig()
UpperCamelCase :Optional[Any] = False
# set attributes based on model_name
UpperCamelCase :List[str] = '''huggingface/label-files'''
if "segformer" in model_name:
UpperCamelCase :Any = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
UpperCamelCase :Tuple = 150
UpperCamelCase :Any = '''ade20k-id2label.json'''
UpperCamelCase :Dict = (1, 150, 128, 128)
elif "city" in model_name:
UpperCamelCase :Union[str, Any] = 19
UpperCamelCase :str = '''cityscapes-id2label.json'''
UpperCamelCase :Optional[int] = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
UpperCamelCase :Optional[int] = True
UpperCamelCase :Optional[int] = model_name[4:6]
UpperCamelCase :List[str] = 1000
UpperCamelCase :List[str] = '''imagenet-1k-id2label.json'''
UpperCamelCase :Union[str, Any] = (1, 1000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
UpperCamelCase :Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :Optional[int] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCamelCase :Optional[int] = [64, 128, 320, 512]
UpperCamelCase :int = 256
elif size == "b2":
UpperCamelCase :str = [64, 128, 320, 512]
UpperCamelCase :str = 768
UpperCamelCase :Tuple = [3, 4, 6, 3]
elif size == "b3":
UpperCamelCase :List[Any] = [64, 128, 320, 512]
UpperCamelCase :int = 768
UpperCamelCase :List[str] = [3, 4, 18, 3]
elif size == "b4":
UpperCamelCase :Optional[int] = [64, 128, 320, 512]
UpperCamelCase :str = 768
UpperCamelCase :int = [3, 8, 27, 3]
elif size == "b5":
UpperCamelCase :Optional[Any] = [64, 128, 320, 512]
UpperCamelCase :List[str] = 768
UpperCamelCase :Optional[Any] = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
UpperCamelCase :Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE__ , align=SCREAMING_SNAKE_CASE__ , do_random_crop=SCREAMING_SNAKE_CASE__ )
# prepare image
UpperCamelCase :Optional[int] = prepare_img()
UpperCamelCase :str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
UpperCamelCase :List[str] = torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('''cpu''' ) )
else:
UpperCamelCase :Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
UpperCamelCase :Tuple = rename_keys(SCREAMING_SNAKE_CASE__ , encoder_only=SCREAMING_SNAKE_CASE__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
if encoder_only:
UpperCamelCase :str = False
UpperCamelCase :Any = SegformerForImageClassification(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :int = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# forward pass
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCamelCase :Union[str, Any] = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCamelCase :Tuple = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCamelCase :Optional[int] = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCamelCase :Any = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCamelCase :Dict = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCamelCase :int = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCamelCase :List[Any] = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCamelCase :int = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCamelCase :Optional[int] = torch.tensor(
[
[
[-1.1_3_7_2e0_1, -1.2_7_8_7e0_1, -1.3_4_7_7e0_1],
[-1.2_5_3_6e0_1, -1.4_1_9_4e0_1, -1.4_4_0_9e0_1],
[-1.3_2_1_7e0_1, -1.4_8_8_8e0_1, -1.5_3_2_7e0_1],
],
[
[-1.4_7_9_1e0_1, -1.7_1_2_2e0_1, -1.8_2_7_7e0_1],
[-1.7_1_6_3e0_1, -1.9_1_9_2e0_1, -1.9_5_3_3e0_1],
[-1.7_8_9_7e0_1, -1.9_9_9_1e0_1, -2.0_3_1_5e0_1],
],
[
[7.6_7_2_3e-0_1, 4.1_9_2_1e-0_1, -7.7_8_7_8e-0_2],
[4.7_7_7_2e-0_1, 9.5_5_5_7e-0_3, -2.8_0_8_2e-0_1],
[3.6_0_3_2e-0_1, -2.4_8_2_6e-0_1, -5.1_1_6_8e-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCamelCase :Optional[int] = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCamelCase :str = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCamelCase :int = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCamelCase :int = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCamelCase :Dict = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCamelCase :Union[str, Any] = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
UpperCamelCase :Dict = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 658 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]:
UpperCamelCase :int = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :List[Any] = patch_size
UpperCamelCase :Optional[int] = max_length
UpperCamelCase :Union[str, Any] = num_mel_bins
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :Dict = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :str = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = scope
UpperCamelCase :List[Any] = frequency_stride
UpperCamelCase :Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension
UpperCamelCase :Optional[int] = num_patches + 2
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :str = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self ) -> List[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Dict =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = ASTModelTester(self )
UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Any = [*signature.parameters.keys()]
UpperCamelCase :Optional[int] = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Any = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Tuple:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.default_feature_extractor
UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.default_feature_extractor
UpperCamelCase , UpperCamelCase :Dict = prepare_audio()
UpperCamelCase :Dict = audio.squeeze().numpy()
UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(SCREAMING_SNAKE_CASE__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 658 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
UpperCamelCase :Dict = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0
UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase :Tuple = 2.0 * image - 1.0
UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
UpperCamelCase :int = True
UpperCamelCase :Dict = va.device
UpperCamelCase :List[Any] = va.cpu().numpy()
UpperCamelCase :str = va.cpu().numpy()
UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
UpperCamelCase :Any = (1 - t) * va + t * va
else:
UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = theta_a * t
UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a
UpperCamelCase :Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
for param in model.parameters():
UpperCamelCase :Any = value
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Union[str, Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# get the original timestep using init_timestep
UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[str] = 0.1_8215 * init_latents
UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = init_latents
return latents
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
UpperCamelCase :List[str] = latents.detach().requires_grad_()
UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = self.scheduler.sigmas[index]
UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :int = 1 / 0.1_8215 * sample
UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = latents.detach() + grads * (sigma**2)
UpperCamelCase :Optional[Any] = noise_pred_original
else:
UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1:
UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1)
UpperCamelCase :Tuple = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# set timesteps
UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase :List[str] = {}
if accepts_offset:
UpperCamelCase :Tuple = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = slerp(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase :Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase :Any = content_text_input.input_ids.shape[-1]
UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase :int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase :str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase :Dict = {}
if accepts_eta:
UpperCamelCase :int = eta
# check if the scheduler accepts generator
UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase :List[str] = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 )
UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase :int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase :str = self.cond_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents
UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case = """base_with_context"""
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
UpperCamelCase :List[Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase :Union[str, Any] = weights[F'''layers_{lyr_num}''']
UpperCamelCase :Dict = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase :Tuple = ly_weight['''attention''']
UpperCamelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase :int = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase :int = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
UpperCamelCase :Tuple = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCamelCase :int = weights[F'''layers_{lyr_num}''']
UpperCamelCase :str = ly_weight['''attention''']
UpperCamelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase :List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase :str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase :Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :int = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
UpperCamelCase :int = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCamelCase :int = weights[F'''layers_{lyr_num}''']
UpperCamelCase :Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase :List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCamelCase :str = ly_weight['''self_attention''']
UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase :Tuple = ly_weight['''MultiHeadDotProductAttention_0''']
UpperCamelCase :Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCamelCase :Dict = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
UpperCamelCase :Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCamelCase :Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCamelCase :Optional[Any] = jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
UpperCamelCase :int = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
UpperCamelCase :Tuple = inference.parse_training_gin_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
UpperCamelCase :Optional[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
UpperCamelCase :int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
UpperCamelCase :Tuple = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCamelCase :List[Any] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = load_decoder(ta_checkpoint['''target''']['''decoder'''] , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
UpperCamelCase :List[str] = SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE__ , continuous_encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , melgan=SCREAMING_SNAKE_CASE__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__snake_case = parser.parse_args()
main(args)
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[int]] = []
UpperCamelCase :list[int] = []
UpperCamelCase :List[str] = 0
UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
__snake_case = [3, 34, 4, 12, 5, 2]
__snake_case = 9
__snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] ='swin2sr'
UpperCamelCase_ : str ={
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=180 , SCREAMING_SNAKE_CASE_=[6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE_=[6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_="1conv" , SCREAMING_SNAKE_CASE_="pixelshuffle" , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :List[Any] = patch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :Dict = embed_dim
UpperCamelCase :Tuple = depths
UpperCamelCase :int = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = num_heads
UpperCamelCase :int = window_size
UpperCamelCase :Tuple = mlp_ratio
UpperCamelCase :Dict = qkv_bias
UpperCamelCase :Union[str, Any] = hidden_dropout_prob
UpperCamelCase :str = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = drop_path_rate
UpperCamelCase :Optional[Any] = hidden_act
UpperCamelCase :Optional[int] = use_absolute_embeddings
UpperCamelCase :Any = layer_norm_eps
UpperCamelCase :Optional[int] = initializer_range
UpperCamelCase :Union[str, Any] = upscale
UpperCamelCase :Any = img_range
UpperCamelCase :int = resi_connection
UpperCamelCase :List[str] = upsampler
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
UpperCamelCase :Tuple = 0
UpperCamelCase :str = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 658 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]:
UpperCamelCase :int = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :List[Any] = patch_size
UpperCamelCase :Optional[int] = max_length
UpperCamelCase :Union[str, Any] = num_mel_bins
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :Dict = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :str = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = scope
UpperCamelCase :List[Any] = frequency_stride
UpperCamelCase :Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension
UpperCamelCase :Optional[int] = num_patches + 2
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :str = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self ) -> List[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Dict =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = ASTModelTester(self )
UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Any = [*signature.parameters.keys()]
UpperCamelCase :Optional[int] = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Any = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Tuple:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.default_feature_extractor
UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.default_feature_extractor
UpperCamelCase , UpperCamelCase :Dict = prepare_audio()
UpperCamelCase :Dict = audio.squeeze().numpy()
UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCamelCase :str = hex_num[0] == '''-'''
if is_negative:
UpperCamelCase :Union[str, Any] = hex_num[1:]
try:
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCamelCase :Dict = ''''''
while int_num > 0:
UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from __future__ import annotations
from collections.abc import Callable
def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ):
UpperCamelCase :Optional[Any] = x_start
UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase :Any = (x_end - x_start) / steps + xa
UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase :Optional[int] = xa
UpperCamelCase :List[str] = fxa
return area
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__snake_case = 10
while i <= 10_00_00:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase , UpperCamelCase :List[Any] = position
UpperCamelCase :Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase :Dict = []
for position in positions:
UpperCamelCase , UpperCamelCase :str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE__ )
return permissible_positions
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
if is_complete(SCREAMING_SNAKE_CASE__ ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase , UpperCamelCase :Optional[int] = position
if board[y][x] == 0:
UpperCamelCase :Any = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ):
return True
UpperCamelCase :Union[str, Any] = 0
return False
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ):
return board
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
def count_of_possible_combinations(SCREAMING_SNAKE_CASE__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCamelCase :Dict = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE__ )
for item in array )
UpperCamelCase :Optional[Any] = answer
return answer
UpperCamelCase :List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Tuple = [0] * (target + 1)
UpperCamelCase :Union[str, Any] = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = 3
__snake_case = 5
__snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :int = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = GenerationConfig()
UpperCamelCase :List[str] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = GenerationConfig()
UpperCamelCase :Tuple = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
UpperCamelCase :Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase :List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 658 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=50 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> Tuple:
UpperCamelCase :List[str] = parent
UpperCamelCase :Dict = batch_size
UpperCamelCase :int = seq_length
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :List[Any] = use_input_mask
UpperCamelCase :int = vocab_size
UpperCamelCase :Dict = hidden_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :Tuple = num_attention_heads
UpperCamelCase :int = intermediate_size
UpperCamelCase :Any = hidden_act
UpperCamelCase :Tuple = hidden_dropout_prob
UpperCamelCase :Tuple = attention_probs_dropout_prob
UpperCamelCase :List[Any] = max_position_embeddings
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Optional[Any] = use_labels
UpperCamelCase :List[Any] = scope
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :List[str] = None
if self.use_input_mask:
UpperCamelCase :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :int = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self ) -> Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self ) -> Union[str, Any]:
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :List[Any] = self.prepare_config_and_inputs()
UpperCamelCase :Optional[int] = True
UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
UpperCamelCase :Tuple = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> str:
UpperCamelCase :int = True
UpperCamelCase :List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Optional[int] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> Any:
UpperCamelCase :Any = True
UpperCamelCase :Tuple = True
UpperCamelCase :int = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
UpperCamelCase :Tuple = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase :Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase :str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase :Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase :Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase :Any = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['''hidden_states'''][0]
UpperCamelCase :Any = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['''hidden_states'''][0]
# select random slice
UpperCamelCase :Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase :Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase :str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , ) -> int:
UpperCamelCase :Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = self.prepare_config_and_inputs()
UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCamelCase_ : Optional[int] =(BertGenerationDecoder,) if is_torch_available() else ()
UpperCamelCase_ : str =(
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[Any] = BertGenerationEncoderTester(self )
UpperCamelCase :int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
UpperCamelCase :List[Any] = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase :int = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :int = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Union[str, Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
UpperCamelCase :str = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase :int = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Dict = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
UpperCamelCase :Tuple = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase :List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :int = [True] * limit
UpperCamelCase :Any = False
UpperCamelCase :Optional[int] = False
UpperCamelCase :Any = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase :str = i * 2
while index < limit:
UpperCamelCase :Any = False
UpperCamelCase :List[str] = index + i
UpperCamelCase :Union[str, Any] = [2]
for i in range(3 , SCREAMING_SNAKE_CASE__ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE__ )
return primes
def _A ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
UpperCamelCase :Union[str, Any] = prime_sieve(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = 0
UpperCamelCase :List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + length , len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase :Optional[int] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase :Optional[int] = j - i
UpperCamelCase :Optional[int] = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M'
UpperCamelCase_ : Optional[Any] =(
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCamelCase_ : Dict ='translator'
UpperCamelCase_ : Any =AutoTokenizer
UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM
UpperCamelCase_ : List[Any] =LANGUAGE_CODES
UpperCamelCase_ : int =['text', 'text', 'text']
UpperCamelCase_ : Union[str, Any] =['text']
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
UpperCamelCase :Optional[int] = self.lang_to_code[src_lang]
UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
__snake_case = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
__snake_case = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
__snake_case = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def UpperCAmelCase ( self ) -> Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="uniform_average" , SCREAMING_SNAKE_CASE_=True ) -> int:
UpperCamelCase :List[Any] = mean_squared_error(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ , multioutput=SCREAMING_SNAKE_CASE_ , squared=SCREAMING_SNAKE_CASE_ )
return {"mse": mse}
| 658 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if array[i] == target:
return i
return -1
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Tuple = 0
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1
UpperCamelCase :str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase :int = one_third - 1
elif array[two_third] < target:
UpperCamelCase :Any = two_third + 1
else:
UpperCamelCase :Any = one_third + 1
UpperCamelCase :int = two_third - 1
else:
return -1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = (left + right) // 3 + 1
UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 658 | 1 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[int]] = []
UpperCamelCase :list[int] = []
UpperCamelCase :List[str] = 0
UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
__snake_case = [3, 34, 4, 12, 5, 2]
__snake_case = 9
__snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :Optional[Any] = 0
if start < end:
UpperCamelCase :List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = a[end]
UpperCamelCase :Tuple = a[pivot]
UpperCamelCase :Any = temp
UpperCamelCase , UpperCamelCase :List[Any] = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :List[str] = 0
UpperCamelCase :Optional[int] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = a[end]
UpperCamelCase :Optional[Any] = a[pivot]
UpperCamelCase :int = temp
UpperCamelCase :str = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase :Optional[int] = new_pivot_index + 1
UpperCamelCase :Optional[Any] = a[new_pivot_index]
UpperCamelCase :List[str] = a[index]
UpperCamelCase :Tuple = temp
UpperCamelCase :List[str] = a[new_pivot_index + 1]
UpperCamelCase :Tuple = a[end]
UpperCamelCase :str = temp
return new_pivot_index + 1, count
__snake_case = TemporaryFile()
__snake_case = 1_00 # 1000 elements are to be sorted
__snake_case , __snake_case = 0, 1 # mean and standard deviation
__snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
__snake_case = np.load(outfile)
__snake_case = len(M) - 1
__snake_case = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='focalnet'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :int = embed_dim
UpperCamelCase :Optional[Any] = use_conv_embed
UpperCamelCase :str = hidden_sizes
UpperCamelCase :str = depths
UpperCamelCase :Optional[int] = focal_levels
UpperCamelCase :Tuple = focal_windows
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Optional[int] = mlp_ratio
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :int = drop_path_rate
UpperCamelCase :Dict = use_layerscale
UpperCamelCase :List[str] = layerscale_value
UpperCamelCase :Tuple = use_post_layernorm
UpperCamelCase :int = use_post_layernorm_in_modulation
UpperCamelCase :str = normalize_modulator
UpperCamelCase :Any = initializer_range
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Dict = encoder_stride
UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 658 | 1 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : str ):
return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain]
def _A ( SCREAMING_SNAKE_CASE__ : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def _A ( ):
UpperCamelCase :Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
| 658 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Tuple = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Any = patch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :int = is_training
UpperCamelCase :str = use_labels
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :List[Any] = backbone_out_indices
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :int = backbone_featmap_shape
UpperCamelCase :Any = scope
UpperCamelCase :int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Dict = (image_size // patch_size) ** 2
UpperCamelCase :List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Optional[Any] = self.num_labels
UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Optional[int] = self.num_labels
UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Tuple =False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Any = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[Any] = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Any:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.txt"""}
__snake_case = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
__snake_case = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
__snake_case = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict =ConvBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCamelCase :List[Any] = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('''type''' ) )
UpperCamelCase :Any = do_lower_case
UpperCamelCase :List[str] = strip_accents
UpperCamelCase :List[str] = tokenize_chinese_chars
UpperCamelCase :Union[str, Any] = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = do_lower_case
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
UpperCamelCase :List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase :Optional[Any] = [self.sep_token_id]
UpperCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase :Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 658 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase :Any = 128
elif "12-12" in model_name:
UpperCamelCase :Union[str, Any] = 12
UpperCamelCase :Any = 12
elif "14-14" in model_name:
UpperCamelCase :Optional[int] = 14
UpperCamelCase :List[str] = 14
elif "16-16" in model_name:
UpperCamelCase :List[Any] = 16
UpperCamelCase :Optional[Any] = 16
else:
raise ValueError('''Model not supported''' )
UpperCamelCase :Tuple = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCamelCase :Optional[Any] = 35
UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json'''
else:
UpperCamelCase :Optional[int] = 527
UpperCamelCase :List[Any] = '''audioset-id2label.json'''
UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :List[Any] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
return config
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if "module.v" in name:
UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
UpperCamelCase :Any = key.split('''.''' )
UpperCamelCase :str = int(key_split[3] )
UpperCamelCase :Union[str, Any] = config.hidden_size
if "weight" in key:
UpperCamelCase :List[str] = val[:dim, :]
UpperCamelCase :Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase :Optional[Any] = val[-dim:, :]
else:
UpperCamelCase :Dict = val[:dim]
UpperCamelCase :Optional[int] = val[dim : dim * 2]
UpperCamelCase :List[Any] = val[-dim:]
else:
UpperCamelCase :Union[str, Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ):
UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCamelCase :Optional[int] = model_name_to_url[model_name]
UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128
UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array''']
else:
UpperCamelCase :List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = waveform.squeeze().numpy()
UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 | 1 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
UpperCamelCase :Optional[Any] = nn.Parameter(SCREAMING_SNAKE_CASE__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
UpperCamelCase :Dict = nn.Parameter(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
# set torch weights for 1-to-1 comparison
UpperCamelCase :Tuple = np.asarray(weights[0] )
UpperCamelCase :List[str] = np.asarray(weights[1] )
UpperCamelCase :Optional[Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
# set torch weights for 1-to-1 comparison
UpperCamelCase :List[Any] = np.asarray(weights[0] )
UpperCamelCase :List[str] = np.asarray(weights[1] )
UpperCamelCase :Optional[int] = np.asarray(weights[2] )
UpperCamelCase :int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# layernorm 1
UpperCamelCase :Tuple = weights[0][0][0]
UpperCamelCase :Dict = np.asarray(layer_norm_a[0] )
UpperCamelCase :Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# lsh weights + output
UpperCamelCase :Tuple = weights[0][1]
if len(SCREAMING_SNAKE_CASE__ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
# intermediate weighs
UpperCamelCase :Union[str, Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE__ ) == 4:
UpperCamelCase :Tuple = intermediate_weights[2]
# layernorm 2
UpperCamelCase :int = np.asarray(intermediate_weights[0][0] )
UpperCamelCase :Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate dense
UpperCamelCase :str = np.asarray(intermediate_weights[1][0] )
UpperCamelCase :List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate out
UpperCamelCase :Dict = np.asarray(intermediate_weights[4][0] )
UpperCamelCase :Union[str, Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
# reformer model
UpperCamelCase :Optional[Any] = torch_model.reformer
# word embeds
UpperCamelCase :Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCamelCase :List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
UpperCamelCase :Dict = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase :int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCamelCase :Optional[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# output layer norm
UpperCamelCase :Dict = np.asarray(weights[7][0] )
UpperCamelCase :Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# output embeddings
UpperCamelCase :Optional[Any] = np.asarray(weights[9][0] )
UpperCamelCase :Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
# Initialise PyTorch model
UpperCamelCase :Optional[Any] = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase :int = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
UpperCamelCase :str = pickle.load(SCREAMING_SNAKE_CASE__ )['''weights''']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 658 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__snake_case = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__snake_case = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Tuple = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0
UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ )
return {
"accuracy": accuracy,
}
| 658 | 1 |
def _A ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__snake_case = generate_large_matrix()
__snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
assert all(row == sorted(SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE__ ) == sorted(SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) for col in zip(*SCREAMING_SNAKE_CASE__ ) )
def _A ( SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Any = 0
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase :List[Any] = (left + right) // 2
UpperCamelCase :List[str] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase :Union[str, Any] = mid + 1
else:
UpperCamelCase :Tuple = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
UpperCamelCase :Dict = 0
UpperCamelCase :Optional[Any] = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase :Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE__ ) * len(grid[0] )) - total
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
UpperCamelCase :Union[str, Any] = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE__ ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE__ ) - i
break
return total
def _A ( ):
from timeit import timeit
print('''Running benchmarks''' )
UpperCamelCase :List[str] = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase :List[str] = timeit(F'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE__ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 658 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658 | 1 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list ):
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return []
UpperCamelCase , UpperCamelCase :List[Any] = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = int(max_value - min_value ) + 1
UpperCamelCase :list[list] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE__ )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 658 |
from __future__ import annotations
from collections.abc import Callable
def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ):
UpperCamelCase :Optional[Any] = x_start
UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase :Any = (x_end - x_start) / steps + xa
UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase :Optional[int] = xa
UpperCamelCase :List[str] = fxa
return area
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__snake_case = 10
while i <= 10_00_00:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 658 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE_=None , ) -> List[str]:
UpperCamelCase :List[Any] = parent
UpperCamelCase :Union[str, Any] = batch_size
UpperCamelCase :Union[str, Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :str = num_channels
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Any = use_labels
UpperCamelCase :Any = hidden_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :Union[str, Any] = num_attention_heads
UpperCamelCase :Dict = intermediate_size
UpperCamelCase :Tuple = hidden_act
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = scope
UpperCamelCase :str = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCamelCase :Any = (self.image_size // 32) ** 2
UpperCamelCase :List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Union[str, Any] = ViTHybridModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :Any = self.type_sequence_label_size
UpperCamelCase :int = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = config_and_inputs
UpperCamelCase :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] =(
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Dict =False
UpperCamelCase_ : Optional[int] =False
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[str] = ViTHybridModelTester(self )
UpperCamelCase :Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[str] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase :str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :List[str] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCamelCase :Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :List[str] = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Optional[int]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.default_image_processor
UpperCamelCase :str = prepare_img()
UpperCamelCase :int = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
@require_accelerate
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCamelCase :Optional[Any] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
UpperCamelCase :Union[str, Any] = prepare_img()
UpperCamelCase :Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCamelCase :List[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 658 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,)
UpperCamelCase_ : Any =10
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = 10
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps[0]
UpperCamelCase :Union[str, Any] = scheduler.timesteps[1]
UpperCamelCase :str = self.dummy_sample
UpperCamelCase :List[str] = 0.1 * sample
UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> List[str]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[Any] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps
UpperCamelCase :Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = self.dummy_model()
UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = pred_prev_sample
UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Dict = self.scheduler_classes[0]
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = scheduler.timesteps
UpperCamelCase :int = torch.manual_seed(0 )
UpperCamelCase :str = self.dummy_model()
UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :int = pred_prev_sample
UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :Tuple = self.get_scheduler_config()
UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [39, 30, 12, 1, 0]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[int] = self.scheduler_classes[0]
UpperCamelCase :List[str] = self.get_scheduler_config()
UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LayoutLMv3FeatureExtractor"""]
__snake_case = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :int = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = GenerationConfig()
UpperCamelCase :List[str] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = GenerationConfig()
UpperCamelCase :Tuple = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
UpperCamelCase :Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase :List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 658 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return None
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return None
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =[
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> int:
from transformers import BertModel
UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[str]:
from transformers import BertModel
UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[Any]:
from transformers import TFBertModel
UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 658 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> None:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 658 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase :List[str] = 5
# Realm tok
UpperCamelCase :List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = self.get_config()
UpperCamelCase :str = self.get_dummy_retriever()
UpperCamelCase :int = retriever.tokenizer
UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' )
UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Tuple = tokenizer(
['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Optional[Any] = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = self.get_config()
UpperCamelCase :Union[str, Any] = self.get_dummy_retriever()
UpperCamelCase :Dict = retriever.tokenizer
UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' )
UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Optional[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Any = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCamelCase :Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 658 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
UpperCamelCase :Any = self.transformer_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_ , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :int = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
UpperCamelCase :Any = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
UpperCamelCase :Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCamelCase :int = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , SCREAMING_SNAKE_CASE_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with a really long name
UpperCamelCase :List[Any] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('''Bert''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , SCREAMING_SNAKE_CASE_ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , SCREAMING_SNAKE_CASE_ ) , )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Dict = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
UpperCamelCase :List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
UpperCamelCase :Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCamelCase :str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
UpperCamelCase , UpperCamelCase :List[Any] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , localized_readme['''format_model_list'''] )
self.assertFalse(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Optional[Any] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
UpperCamelCase :Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCamelCase :str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
UpperCamelCase , UpperCamelCase :Optional[int] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 658 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]:
UpperCamelCase :int = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :List[Any] = patch_size
UpperCamelCase :Optional[int] = max_length
UpperCamelCase :Union[str, Any] = num_mel_bins
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :Dict = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :str = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = scope
UpperCamelCase :List[Any] = frequency_stride
UpperCamelCase :Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension
UpperCamelCase :Optional[int] = num_patches + 2
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :str = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self ) -> List[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Dict =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = ASTModelTester(self )
UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Any = [*signature.parameters.keys()]
UpperCamelCase :Optional[int] = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Any = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Tuple:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.default_feature_extractor
UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.default_feature_extractor
UpperCamelCase , UpperCamelCase :Dict = prepare_audio()
UpperCamelCase :Dict = audio.squeeze().numpy()
UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCamelCase :str = hex_num[0] == '''-'''
if is_negative:
UpperCamelCase :Union[str, Any] = hex_num[1:]
try:
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCamelCase :Dict = ''''''
while int_num > 0:
UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
UpperCamelCase :Dict = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0
UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase :Tuple = 2.0 * image - 1.0
UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
UpperCamelCase :int = True
UpperCamelCase :Dict = va.device
UpperCamelCase :List[Any] = va.cpu().numpy()
UpperCamelCase :str = va.cpu().numpy()
UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
UpperCamelCase :Any = (1 - t) * va + t * va
else:
UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = theta_a * t
UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a
UpperCamelCase :Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
for param in model.parameters():
UpperCamelCase :Any = value
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Union[str, Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# get the original timestep using init_timestep
UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[str] = 0.1_8215 * init_latents
UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = init_latents
return latents
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
UpperCamelCase :List[str] = latents.detach().requires_grad_()
UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = self.scheduler.sigmas[index]
UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :int = 1 / 0.1_8215 * sample
UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = latents.detach() + grads * (sigma**2)
UpperCamelCase :Optional[Any] = noise_pred_original
else:
UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1:
UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1)
UpperCamelCase :Tuple = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# set timesteps
UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase :List[str] = {}
if accepts_offset:
UpperCamelCase :Tuple = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = slerp(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase :Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase :Any = content_text_input.input_ids.shape[-1]
UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase :int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase :str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase :Dict = {}
if accepts_eta:
UpperCamelCase :int = eta
# check if the scheduler accepts generator
UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase :List[str] = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 )
UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase :int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase :str = self.cond_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents
UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case = False
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = '''A painting of a squirrel eating a burger '''
UpperCamelCase :Tuple = torch.manual_seed(0 )
UpperCamelCase :Dict = pipe(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = VersatileDiffusionTextToImagePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = generator.manual_seed(0 )
UpperCamelCase :Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = '''A painting of a squirrel eating a burger '''
UpperCamelCase :List[Any] = torch.manual_seed(0 )
UpperCamelCase :Dict = pipe(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCamelCase :Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[int]] = []
UpperCamelCase :list[int] = []
UpperCamelCase :List[str] = 0
UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
__snake_case = [3, 34, 4, 12, 5, 2]
__snake_case = 9
__snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 658 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , ) -> Optional[int]:
super().__init__()
self.register_modules(transformer=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase :Dict = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[int]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[int] = list(SCREAMING_SNAKE_CASE_ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 4.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCamelCase :List[Any] = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = self.transformer.config.sample_size
UpperCamelCase :Any = self.transformer.config.in_channels
UpperCamelCase :int = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase :Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase :str = torch.tensor(SCREAMING_SNAKE_CASE_ , device=self.device ).reshape(-1 )
UpperCamelCase :Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
UpperCamelCase :Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase :int = latent_model_input[: len(SCREAMING_SNAKE_CASE_ ) // 2]
UpperCamelCase :List[Any] = torch.cat([half, half] , dim=0 )
UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = t
if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase :Tuple = latent_model_input.device.type == '''mps'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :List[str] = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase :Union[str, Any] = torch.intaa if is_mps else torch.intaa
UpperCamelCase :Union[str, Any] = torch.tensor([timesteps] , dtype=SCREAMING_SNAKE_CASE_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase :Dict = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase :Any = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase :Tuple = self.transformer(
SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase , UpperCamelCase :Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase , UpperCamelCase :List[Any] = torch.split(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) // 2 , dim=0 )
UpperCamelCase :str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase :Optional[Any] = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase :List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase , UpperCamelCase :Union[str, Any] = torch.split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dim=1 )
else:
UpperCamelCase :Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase :str = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
if guidance_scale > 1:
UpperCamelCase , UpperCamelCase :int = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase :int = latent_model_input
UpperCamelCase :Optional[Any] = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase :Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase :Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
UpperCamelCase :Tuple = 0
UpperCamelCase :str = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 658 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
__snake_case = {
"""squeezebert/squeezebert-uncased""": 5_12,
"""squeezebert/squeezebert-mnli""": 5_12,
"""squeezebert/squeezebert-mnli-headless""": 5_12,
}
__snake_case = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : str =VOCAB_FILES_NAMES
UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict =SqueezeBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCamelCase :Tuple = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('''type''' ) )
UpperCamelCase :Any = do_lower_case
UpperCamelCase :Tuple = strip_accents
UpperCamelCase :int = tokenize_chinese_chars
UpperCamelCase :Tuple = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = do_lower_case
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
UpperCamelCase :str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase :List[Any] = [self.sep_token_id]
UpperCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase :Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCamelCase :str = hex_num[0] == '''-'''
if is_negative:
UpperCamelCase :Union[str, Any] = hex_num[1:]
try:
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCamelCase :Dict = ''''''
while int_num > 0:
UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase , UpperCamelCase :List[Any] = position
UpperCamelCase :Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase :Dict = []
for position in positions:
UpperCamelCase , UpperCamelCase :str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE__ )
return permissible_positions
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
if is_complete(SCREAMING_SNAKE_CASE__ ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase , UpperCamelCase :Optional[int] = position
if board[y][x] == 0:
UpperCamelCase :Any = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ):
return True
UpperCamelCase :Union[str, Any] = 0
return False
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ):
return board
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__snake_case = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : str ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
return max(metric_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for gt in ground_truths )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
UpperCamelCase :List[str] = []
if args.gold_data_mode == "qa":
UpperCamelCase :Any = pd.read_csv(SCREAMING_SNAKE_CASE__ , sep='''\t''' , header=SCREAMING_SNAKE_CASE__ )
for answer_list in data[1]:
UpperCamelCase :Tuple = ast.literal_eval(SCREAMING_SNAKE_CASE__ )
answers.append(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :Optional[int] = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
UpperCamelCase :Any = [[reference] for reference in references]
UpperCamelCase :Optional[int] = 0
for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
total += 1
em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = 1_00.0 * em / total
UpperCamelCase :Dict = 1_00.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :List[Any] = args.k
UpperCamelCase :int = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
UpperCamelCase :Dict = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
UpperCamelCase :Optional[Any] = 0
for hypo, reference in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = set(hypo.split('''\t''' )[:k] )
UpperCamelCase :Optional[int] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase :Tuple = 1_00.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
def strip_title(SCREAMING_SNAKE_CASE__ : List[Any] ):
if title.startswith('''"''' ):
UpperCamelCase :Optional[int] = title[1:]
if title.endswith('''"''' ):
UpperCamelCase :Union[str, Any] = title[:-1]
return title
UpperCamelCase :int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )['''input_ids'''].to(args.device )
UpperCamelCase :Tuple = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = question_enc_outputs[0]
UpperCamelCase :List[Any] = rag_model.retriever(
SCREAMING_SNAKE_CASE__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
UpperCamelCase :int = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase :List[str] = []
for docs in all_docs:
UpperCamelCase :str = [strip_title(SCREAMING_SNAKE_CASE__ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(SCREAMING_SNAKE_CASE__ ) )
return provenance_strings
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
with torch.no_grad():
UpperCamelCase :Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = inputs_dict.input_ids.to(args.device )
UpperCamelCase :Optional[Any] = inputs_dict.attention_mask.to(args.device )
UpperCamelCase :List[Any] = rag_model.generate( # rag_model overwrites generate
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase :Optional[int] = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
if args.print_predictions:
for q, a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info('''Q: {} - A: {}'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return answers
def _A ( ):
UpperCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=SCREAMING_SNAKE_CASE__ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=SCREAMING_SNAKE_CASE__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=SCREAMING_SNAKE_CASE__ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=SCREAMING_SNAKE_CASE__ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=SCREAMING_SNAKE_CASE__ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=SCREAMING_SNAKE_CASE__ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=SCREAMING_SNAKE_CASE__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=SCREAMING_SNAKE_CASE__ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=SCREAMING_SNAKE_CASE__ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=SCREAMING_SNAKE_CASE__ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
UpperCamelCase :Optional[Any] = parser.parse_args()
UpperCamelCase :int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :List[str] = {}
if args.model_type is None:
UpperCamelCase :List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
UpperCamelCase :Optional[int] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
UpperCamelCase :Any = args.n_docs
if args.index_name is not None:
UpperCamelCase :List[str] = args.index_name
if args.index_path is not None:
UpperCamelCase :Optional[int] = args.index_path
else:
UpperCamelCase :Union[str, Any] = BartForConditionalGeneration
UpperCamelCase :int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
UpperCamelCase :Tuple = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(SCREAMING_SNAKE_CASE__ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(SCREAMING_SNAKE_CASE__ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
UpperCamelCase :int = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , retriever=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
model.retriever.init_retrieval()
else:
UpperCamelCase :Union[str, Any] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
UpperCamelCase :Dict = []
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
questions.append(line.strip() )
if len(SCREAMING_SNAKE_CASE__ ) == args.eval_batch_size:
UpperCamelCase :Dict = evaluate_batch_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
preds_file.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
preds_file.flush()
UpperCamelCase :Optional[int] = []
if len(SCREAMING_SNAKE_CASE__ ) > 0:
UpperCamelCase :Any = evaluate_batch_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
preds_file.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
preds_file.flush()
score_fn(SCREAMING_SNAKE_CASE__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__snake_case = get_args()
main(args)
| 658 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :int = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = GenerationConfig()
UpperCamelCase :List[str] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = GenerationConfig()
UpperCamelCase :Tuple = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
UpperCamelCase :Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase :List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase :List[Any] = _modexpt(SCREAMING_SNAKE_CASE__ , exponent // 2 , SCREAMING_SNAKE_CASE__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE__ , exponent - 1 , SCREAMING_SNAKE_CASE__ )) % modulo_value
def _A ( SCREAMING_SNAKE_CASE__ : int = 1777 , SCREAMING_SNAKE_CASE__ : int = 1855 , SCREAMING_SNAKE_CASE__ : int = 8 ):
UpperCamelCase :Union[str, Any] = base
for _ in range(1 , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = _modexpt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
from math import ceil
def _A ( SCREAMING_SNAKE_CASE__ : int = 1001 ):
UpperCamelCase :Optional[int] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCamelCase :Any = 2 * i + 1
UpperCamelCase :Any = 2 * i
UpperCamelCase :Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 658 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M'
UpperCamelCase_ : Optional[Any] =(
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCamelCase_ : Dict ='translator'
UpperCamelCase_ : Any =AutoTokenizer
UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM
UpperCamelCase_ : List[Any] =LANGUAGE_CODES
UpperCamelCase_ : int =['text', 'text', 'text']
UpperCamelCase_ : Union[str, Any] =['text']
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
UpperCamelCase :Optional[int] = self.lang_to_code[src_lang]
UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
import argparse
from collections import defaultdict
import yaml
__snake_case = """docs/source/en/_toctree.yml"""
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :str = defaultdict(SCREAMING_SNAKE_CASE__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCamelCase :Optional[int] = [key for key, value in counts.items() if value > 1]
UpperCamelCase :int = []
for duplicate_key in duplicates:
UpperCamelCase :Optional[int] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : s["title"].lower() )
def _A ( SCREAMING_SNAKE_CASE__ : str=False ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
UpperCamelCase :str = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase :List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase :Dict = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase :List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCamelCase :List[str] = api_doc[model_idx]['''sections''']
UpperCamelCase :Dict = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__ ) if '''sections''' in section]
UpperCamelCase :List[Any] = False
for idx, modality_doc in modalities_docs:
UpperCamelCase :Any = modality_doc['''sections''']
UpperCamelCase :int = clean_model_doc_toc(SCREAMING_SNAKE_CASE__ )
if old_modality_doc != new_modality_doc:
UpperCamelCase :List[str] = True
if overwrite:
UpperCamelCase :List[Any] = new_modality_doc
if diff:
if overwrite:
UpperCamelCase :Any = model_doc
UpperCamelCase :Any = api_doc
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__snake_case = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 658 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if array[i] == target:
return i
return -1
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Tuple = 0
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1
UpperCamelCase :str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase :int = one_third - 1
elif array[two_third] < target:
UpperCamelCase :Any = two_third + 1
else:
UpperCamelCase :Any = one_third + 1
UpperCamelCase :int = two_third - 1
else:
return -1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = (left + right) // 3 + 1
UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 658 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='bloom'
UpperCamelCase_ : List[str] =['past_key_values']
UpperCamelCase_ : int ={
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self , SCREAMING_SNAKE_CASE_=25_0880 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
UpperCamelCase :List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase :Tuple = kwargs.pop('''n_embed''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = hidden_size if n_embed is None else n_embed
UpperCamelCase :str = n_layer
UpperCamelCase :List[Any] = n_head
UpperCamelCase :Dict = layer_norm_epsilon
UpperCamelCase :Optional[int] = initializer_range
UpperCamelCase :str = use_cache
UpperCamelCase :Optional[Any] = pretraining_tp
UpperCamelCase :str = apply_residual_connection_post_layernorm
UpperCamelCase :List[str] = hidden_dropout
UpperCamelCase :List[Any] = attention_dropout
UpperCamelCase :int = bos_token_id
UpperCamelCase :List[str] = eos_token_id
UpperCamelCase :Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] =version.parse('1.12' )
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "default" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase :int = 0
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase :Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' , inverted_values_shape=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def UpperCAmelCase ( self ) -> int:
return self._config.n_head
@property
def UpperCAmelCase ( self ) -> float:
return 1e-3
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ) -> Mapping[str, Any]:
UpperCamelCase :Any = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase :Tuple = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase , UpperCamelCase :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase :List[str] = seqlen + 2
UpperCamelCase :int = self._config.hidden_size // self.num_attention_heads
UpperCamelCase :str = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase :str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase :str = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase :Optional[int] = common_inputs['''attention_mask''']
if self.use_past:
UpperCamelCase :Union[str, Any] = ordered_inputs['''attention_mask'''].dtype
UpperCamelCase :Tuple = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self ) -> int:
return 13
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple="attention" ):
UpperCamelCase :Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
UpperCamelCase :List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :int = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
UpperCamelCase :List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :int = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
UpperCamelCase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :Tuple = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
UpperCamelCase :int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=False ):
if split_mlp_wi:
UpperCamelCase :Optional[int] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
UpperCamelCase :str = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
UpperCamelCase :Tuple = (wi_a, wi_a)
else:
UpperCamelCase :Tuple = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
UpperCamelCase :int = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : bool = False ):
UpperCamelCase :str = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase :Dict = {'''/'''.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :List[Any] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :Dict = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''attention''' )
UpperCamelCase :Dict = layer_norm
UpperCamelCase :Union[str, Any] = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :str = q.T
UpperCamelCase :List[Any] = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = layer_norm
if split_mlp_wi:
UpperCamelCase :Any = wi[0].T
UpperCamelCase :List[Any] = wi[1].T
else:
UpperCamelCase :Any = wi.T
UpperCamelCase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Dict = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' ).T
UpperCamelCase :int = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCamelCase :Union[str, Any] = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''encoder''' ).T
UpperCamelCase :int = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''self_attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :Any = k.T
UpperCamelCase :Dict = o.T
UpperCamelCase :int = q.T
UpperCamelCase :Dict = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase :Dict = layer_norm
UpperCamelCase :List[str] = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :str = q.T
UpperCamelCase :int = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Dict = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :Any = wi[0].T
UpperCamelCase :int = wi[1].T
else:
UpperCamelCase :Union[str, Any] = wi.T
UpperCamelCase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[Any] = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' ).T
UpperCamelCase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Optional[int] = old['''decoder/logits_dense/kernel'''].T
return new
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ):
UpperCamelCase :List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Tuple = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Optional[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ):
UpperCamelCase :int = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :Dict = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :str = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Done''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='focalnet'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :int = embed_dim
UpperCamelCase :Optional[Any] = use_conv_embed
UpperCamelCase :str = hidden_sizes
UpperCamelCase :str = depths
UpperCamelCase :Optional[int] = focal_levels
UpperCamelCase :Tuple = focal_windows
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Optional[int] = mlp_ratio
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :int = drop_path_rate
UpperCamelCase :Dict = use_layerscale
UpperCamelCase :List[str] = layerscale_value
UpperCamelCase :Tuple = use_post_layernorm
UpperCamelCase :int = use_post_layernorm_in_modulation
UpperCamelCase :str = normalize_modulator
UpperCamelCase :Any = initializer_range
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Dict = encoder_stride
UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 658 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ )
assert noofclusters < len(SCREAMING_SNAKE_CASE__ )
# Find out the dimensionality
UpperCamelCase :List[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase :Tuple = list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
shuffle(SCREAMING_SNAKE_CASE__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase :Any = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase :int = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase :Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase :Any = tf.placeholder('''float64''' , [dim] )
UpperCamelCase :Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase :int = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase :Union[str, Any] = tf.placeholder('''int32''' )
UpperCamelCase :Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase :List[str] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase :Optional[Any] = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase :Optional[Any] = tf.placeholder('''float''' , [dim] )
UpperCamelCase :Any = tf.placeholder('''float''' , [dim] )
UpperCamelCase :List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase :Dict = tf.placeholder('''float''' , [noofclusters] )
UpperCamelCase :List[Any] = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase :str = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase :List[str] = 100
for _ in range(SCREAMING_SNAKE_CASE__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase :str = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase :Any = [
sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase :Optional[Any] = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE__ ):
# Collect all the vectors assigned to this cluster
UpperCamelCase :List[str] = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase :List[Any] = sess.run(
SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase :Optional[int] = sess.run(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = sess.run(SCREAMING_SNAKE_CASE__ )
return centroids, assignments
| 658 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Tuple = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Any = patch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :int = is_training
UpperCamelCase :str = use_labels
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :List[Any] = backbone_out_indices
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :int = backbone_featmap_shape
UpperCamelCase :Any = scope
UpperCamelCase :int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Dict = (image_size // patch_size) ** 2
UpperCamelCase :List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Optional[Any] = self.num_labels
UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Optional[int] = self.num_labels
UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Tuple =False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Any = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[Any] = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Any:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =UnCLIPImageVariationPipeline
UpperCamelCase_ : Any =IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
UpperCamelCase_ : int =IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Optional[int] =[
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
UpperCamelCase_ : Any =False
@property
def UpperCAmelCase ( self ) -> Dict:
return 32
@property
def UpperCAmelCase ( self ) -> List[str]:
return 32
@property
def UpperCAmelCase ( self ) -> Any:
return self.time_input_dim
@property
def UpperCAmelCase ( self ) -> Tuple:
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ) -> Any:
return 100
@property
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase :Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase :Tuple = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
UpperCamelCase :Union[str, Any] = UnCLIPTextProjModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
UpperCamelCase :int = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase :Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
UpperCamelCase :Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.dummy_decoder
UpperCamelCase :int = self.dummy_text_proj
UpperCamelCase :List[str] = self.dummy_text_encoder
UpperCamelCase :Tuple = self.dummy_tokenizer
UpperCamelCase :List[Any] = self.dummy_super_res_first
UpperCamelCase :List[Any] = self.dummy_super_res_last
UpperCamelCase :Any = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
UpperCamelCase :Optional[Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
UpperCamelCase :Dict = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCamelCase :Dict = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
UpperCamelCase :Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
UpperCamelCase :Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase :Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
if pil_image:
UpperCamelCase :Optional[Any] = input_image * 0.5 + 0.5
UpperCamelCase :Tuple = input_image.clamp(0 , 1 )
UpperCamelCase :str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase :str = DiffusionPipeline.numpy_to_pil(SCREAMING_SNAKE_CASE_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = '''cpu'''
UpperCamelCase :Union[str, Any] = self.get_dummy_components()
UpperCamelCase :str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = output.images
UpperCamelCase :Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
UpperCamelCase :Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase :List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase :Optional[int] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Union[str, Any] = '''cpu'''
UpperCamelCase :List[str] = self.get_dummy_components()
UpperCamelCase :List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = output.images
UpperCamelCase :List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
UpperCamelCase :int = image[0, -3:, -3:, -1]
UpperCamelCase :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase :str = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = '''cpu'''
UpperCamelCase :Dict = self.get_dummy_components()
UpperCamelCase :Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
UpperCamelCase :int = pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = output.images
UpperCamelCase :Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
UpperCamelCase :Any = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCamelCase :Optional[int] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :int = torch.device('''cpu''' )
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : Tuple =1
UpperCamelCase :Any = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase :List[str] = pipe.decoder.dtype
UpperCamelCase :Optional[int] = 1
UpperCamelCase :List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCamelCase :Tuple = pipe.prepare_latents(
SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler() )
UpperCamelCase :Optional[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCamelCase :Optional[Any] = pipe.prepare_latents(
SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler() )
UpperCamelCase :Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = pipe(
**SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
# Don't pass image, instead pass embedding
UpperCamelCase :List[str] = pipeline_inputs.pop('''image''' )
UpperCamelCase :Optional[int] = pipe.image_encoder(SCREAMING_SNAKE_CASE_ ).image_embeds
UpperCamelCase :Union[str, Any] = pipe(
**SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_ , image_embeddings=SCREAMING_SNAKE_CASE_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Optional[int] = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCamelCase :Any = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=SCREAMING_SNAKE_CASE_ )
@skip_mps
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[Any] = torch_device == '''cpu'''
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Any = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Dict = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCamelCase :Optional[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ )
@skip_mps
def UpperCAmelCase ( self ) -> Dict:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase ( self ) -> Dict:
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase ( self ) -> Optional[int]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
UpperCamelCase :Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
UpperCamelCase :int = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase :Tuple = pipeline(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
UpperCamelCase :Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 15 )
| 658 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase :Any = 128
elif "12-12" in model_name:
UpperCamelCase :Union[str, Any] = 12
UpperCamelCase :Any = 12
elif "14-14" in model_name:
UpperCamelCase :Optional[int] = 14
UpperCamelCase :List[str] = 14
elif "16-16" in model_name:
UpperCamelCase :List[Any] = 16
UpperCamelCase :Optional[Any] = 16
else:
raise ValueError('''Model not supported''' )
UpperCamelCase :Tuple = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCamelCase :Optional[Any] = 35
UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json'''
else:
UpperCamelCase :Optional[int] = 527
UpperCamelCase :List[Any] = '''audioset-id2label.json'''
UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :List[Any] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
return config
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if "module.v" in name:
UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
UpperCamelCase :Any = key.split('''.''' )
UpperCamelCase :str = int(key_split[3] )
UpperCamelCase :Union[str, Any] = config.hidden_size
if "weight" in key:
UpperCamelCase :List[str] = val[:dim, :]
UpperCamelCase :Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase :Optional[Any] = val[-dim:, :]
else:
UpperCamelCase :Dict = val[:dim]
UpperCamelCase :Optional[int] = val[dim : dim * 2]
UpperCamelCase :List[Any] = val[-dim:]
else:
UpperCamelCase :Union[str, Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ):
UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCamelCase :Optional[int] = model_name_to_url[model_name]
UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128
UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array''']
else:
UpperCamelCase :List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = waveform.squeeze().numpy()
UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : list ):
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE__ )]
UpperCamelCase :int = []
def generate(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
UpperCamelCase :Optional[Any] = [0] * n
res.append(tuple(SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase :Optional[int] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCamelCase , UpperCamelCase :Optional[int] = arr[i], arr[0]
else:
UpperCamelCase , UpperCamelCase :int = arr[i], arr[c[i]]
res.append(tuple(SCREAMING_SNAKE_CASE__ ) )
c[i] += 1
UpperCamelCase :List[str] = 0
else:
UpperCamelCase :Tuple = 0
i += 1
generate(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
return res
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 658 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
import unittest
from transformers import DonutProcessor
__snake_case = """naver-clova-ix/donut-base"""
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = DonutProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
UpperCamelCase :Tuple = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
UpperCamelCase :Any = self.processor.tokenajson(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 658 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__snake_case = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__snake_case = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Tuple = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0
UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ )
return {
"accuracy": accuracy,
}
| 658 | 1 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if b == 0:
return (1, 0)
((UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = extended_euclid(SCREAMING_SNAKE_CASE__ , a % b )
UpperCamelCase :Any = a // b
return (y, x - k * y)
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
((UpperCamelCase) , (UpperCamelCase)) :Union[str, Any] = extended_euclid(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = na * na
UpperCamelCase :int = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
((UpperCamelCase) , (UpperCamelCase)) :List[str] = extended_euclid(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if b < 0:
UpperCamelCase :Optional[int] = (b % n + n) % n
return b
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase , UpperCamelCase :Union[str, Any] = invert_modulo(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), invert_modulo(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = na * na
UpperCamelCase :Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 658 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :Any = 0
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase :Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
UpperCamelCase :List[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase :List[Any] = left
UpperCamelCase :Optional[Any] = point
elif point > right:
UpperCamelCase :Optional[int] = right
UpperCamelCase :Any = point
else:
if item < current_item:
UpperCamelCase :List[str] = point - 1
else:
UpperCamelCase :Dict = point + 1
return None
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase :Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : str ):
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
__snake_case = 0
if debug == 1:
__snake_case = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
__snake_case = 67
__snake_case = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""")
| 658 |
from __future__ import annotations
from collections.abc import Callable
def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ):
UpperCamelCase :Optional[Any] = x_start
UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase :Any = (x_end - x_start) / steps + xa
UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase :Optional[int] = xa
UpperCamelCase :List[str] = fxa
return area
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__snake_case = 10
while i <= 10_00_00:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 658 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0 ):
# Format the message.
if name is None:
UpperCamelCase :List[str] = None
else:
UpperCamelCase :Union[str, Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
UpperCamelCase :Dict = fmt.format(SCREAMING_SNAKE_CASE__ )
# Print and recurse (if needed).
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if msg is not None:
print(SCREAMING_SNAKE_CASE__ )
for k in val.keys():
recursive_print(SCREAMING_SNAKE_CASE__ , val[k] , spaces + 2 )
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
print(SCREAMING_SNAKE_CASE__ , ''':''' , val.size() )
else:
print(SCREAMING_SNAKE_CASE__ , ''':''' , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
UpperCamelCase :List[str] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase :List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase :Union[str, Any] = param.view(*SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = param.transpose(0 , 2 )
UpperCamelCase :str = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase :str = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase :Any = param.view(*SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = param.transpose(0 , 1 ).contiguous()
UpperCamelCase :Optional[int] = param.view(*SCREAMING_SNAKE_CASE__ )
return param
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
# The converted output model.
UpperCamelCase :Union[str, Any] = {}
# old versions did not store training args
UpperCamelCase :int = input_state_dict.get('''args''' , SCREAMING_SNAKE_CASE__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase :Optional[Any] = ds_args.padded_vocab_size
UpperCamelCase :List[str] = ds_args.max_position_embeddings
UpperCamelCase :Union[str, Any] = ds_args.hidden_size
UpperCamelCase :List[Any] = ds_args.num_layers
UpperCamelCase :Union[str, Any] = ds_args.num_attention_heads
UpperCamelCase :List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase :List[str] = config.n_head
# The hidden_size per head.
UpperCamelCase :Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase :Optional[Any] = input_state_dict['''checkpoint_version''']
else:
UpperCamelCase :int = 0.0
# The model.
UpperCamelCase :str = input_state_dict['''model''']
# The language model.
UpperCamelCase :Optional[int] = model['''language_model''']
# The embeddings.
UpperCamelCase :Tuple = lm['''embedding''']
# The word embeddings.
UpperCamelCase :Dict = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
UpperCamelCase :Optional[int] = word_embeddings[: config.vocab_size, :]
UpperCamelCase :List[Any] = word_embeddings
# The position embeddings.
UpperCamelCase :List[Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase :List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
UpperCamelCase :Optional[Any] = pos_embeddings
# The transformer.
UpperCamelCase :Dict = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
UpperCamelCase :Optional[int] = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
UpperCamelCase :Any = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase :Optional[Any] = layer_re.match(SCREAMING_SNAKE_CASE__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase :Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase :int = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase :List[Any] = m.group(3 )
# The name of the layer.
UpperCamelCase :Optional[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
UpperCamelCase :Union[str, Any] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
UpperCamelCase :Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase :List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase :str = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase :Any = masked_bias
UpperCamelCase :Any = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 3 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase :Tuple = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase :Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase :List[str] = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 3 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Store. No change of shape.
UpperCamelCase :Dict = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase :str = megatron_to_transformers[op_name]
UpperCamelCase :int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase :Dict = megatron_to_transformers[op_name]
UpperCamelCase :List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase :List[str] = transformer['''final_layernorm.weight''']
UpperCamelCase :str = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase :int = word_embeddings
# It should be done!
return output_state_dict
def _A ( ):
# Create the argument parser.
UpperCamelCase :Any = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=SCREAMING_SNAKE_CASE__ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=SCREAMING_SNAKE_CASE__ , help='''An optional config json file describing the pre-trained model.''' , )
UpperCamelCase :Optional[int] = parser.parse_args()
# Extract the basename.
UpperCamelCase :int = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
UpperCamelCase :int = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
else:
UpperCamelCase :str = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
UpperCamelCase :Any = input_state_dict.get('''args''' , SCREAMING_SNAKE_CASE__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase :Optional[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
UpperCamelCase :Any = '''gelu_new'''
else:
UpperCamelCase :List[str] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase :int = '''gelu_new'''
# Spell out all parameters in case the defaults change.
UpperCamelCase :Any = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=SCREAMING_SNAKE_CASE__ , summary_activation=SCREAMING_SNAKE_CASE__ , summary_proj_to_labels=SCREAMING_SNAKE_CASE__ , summary_first_dropout=0.1 , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=50256 , eos_token_id=50256 , )
else:
UpperCamelCase :Tuple = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase :int = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
UpperCamelCase :int = convert_megatron_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase :Dict = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase :Union[str, Any] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase :int = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
UpperCamelCase :Any = '''gpt2'''
UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = type(SCREAMING_SNAKE_CASE__ ).__name__
UpperCamelCase :List[Any] = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Store the state_dict to file.
UpperCamelCase :Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , '''pytorch_model.bin''' )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 658 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,)
UpperCamelCase_ : Any =10
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = 10
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps[0]
UpperCamelCase :Union[str, Any] = scheduler.timesteps[1]
UpperCamelCase :str = self.dummy_sample
UpperCamelCase :List[str] = 0.1 * sample
UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> List[str]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[Any] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps
UpperCamelCase :Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = self.dummy_model()
UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = pred_prev_sample
UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Dict = self.scheduler_classes[0]
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = scheduler.timesteps
UpperCamelCase :int = torch.manual_seed(0 )
UpperCamelCase :str = self.dummy_model()
UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :int = pred_prev_sample
UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :Tuple = self.get_scheduler_config()
UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [39, 30, 12, 1, 0]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[int] = self.scheduler_classes[0]
UpperCamelCase :List[str] = self.get_scheduler_config()
UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='ClapFeatureExtractor'
UpperCamelCase_ : Dict =('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Tuple = kwargs.pop('''sampling_rate''' , SCREAMING_SNAKE_CASE_ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
UpperCamelCase :Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audios is not None:
UpperCamelCase :List[Any] = self.feature_extractor(
SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and audios is not None:
UpperCamelCase :Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> str:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.tokenizer.model_input_names
UpperCamelCase :Optional[int] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 658 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float | Decimal , SCREAMING_SNAKE_CASE__ : float = 10**-10 ):
UpperCamelCase :Dict = a
while True:
UpperCamelCase :Tuple = Decimal(SCREAMING_SNAKE_CASE__ ) - (
Decimal(eval(SCREAMING_SNAKE_CASE__ ) ) / Decimal(eval(str(diff(SCREAMING_SNAKE_CASE__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(SCREAMING_SNAKE_CASE__ ) ) < precision: # noqa: S307
return float(SCREAMING_SNAKE_CASE__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 658 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return None
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return None
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =[
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> int:
from transformers import BertModel
UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[str]:
from transformers import BertModel
UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[Any]:
from transformers import TFBertModel
UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 658 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCamelCase :Dict = ksize + 1
UpperCamelCase :Optional[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE__ ):
for x in range(SCREAMING_SNAKE_CASE__ ):
# distance from center
UpperCamelCase :Tuple = x - ksize // 2
UpperCamelCase :int = y - ksize // 2
# degree to radiant
UpperCamelCase :Tuple = theta / 180 * np.pi
UpperCamelCase :Union[str, Any] = np.cos(_theta )
UpperCamelCase :Any = np.sin(_theta )
# get kernel x
UpperCamelCase :List[Any] = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase :List[Any] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase :int = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__snake_case = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__snake_case = out / out.max() * 2_55
__snake_case = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 658 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase :List[str] = 5
# Realm tok
UpperCamelCase :List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = self.get_config()
UpperCamelCase :str = self.get_dummy_retriever()
UpperCamelCase :int = retriever.tokenizer
UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' )
UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Tuple = tokenizer(
['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Optional[Any] = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = self.get_config()
UpperCamelCase :Union[str, Any] = self.get_dummy_retriever()
UpperCamelCase :Dict = retriever.tokenizer
UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' )
UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Optional[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Any = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCamelCase :Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 658 | 1 |
import heapq
def _A ( SCREAMING_SNAKE_CASE__ : dict ):
UpperCamelCase :list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(SCREAMING_SNAKE_CASE__ , [-1 * len(SCREAMING_SNAKE_CASE__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCamelCase :int = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCamelCase :str = heapq.heappop(SCREAMING_SNAKE_CASE__ )[1][0]
chosen_vertices.add(SCREAMING_SNAKE_CASE__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCamelCase :List[str] = elem[1][1].index(SCREAMING_SNAKE_CASE__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(SCREAMING_SNAKE_CASE__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 658 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]:
UpperCamelCase :int = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :List[Any] = patch_size
UpperCamelCase :Optional[int] = max_length
UpperCamelCase :Union[str, Any] = num_mel_bins
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :Dict = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :str = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = scope
UpperCamelCase :List[Any] = frequency_stride
UpperCamelCase :Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension
UpperCamelCase :Optional[int] = num_patches + 2
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :str = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self ) -> List[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Dict =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = ASTModelTester(self )
UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Any = [*signature.parameters.keys()]
UpperCamelCase :Optional[int] = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Any = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Tuple:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.default_feature_extractor
UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.default_feature_extractor
UpperCamelCase , UpperCamelCase :Dict = prepare_audio()
UpperCamelCase :Dict = audio.squeeze().numpy()
UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
import requests
from bsa import BeautifulSoup
def _A ( SCREAMING_SNAKE_CASE__ : str = "https://www.worldometers.info/coronavirus" ):
UpperCamelCase :Optional[int] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , '''html.parser''' )
UpperCamelCase :Union[str, Any] = soup.findAll('''h1''' )
UpperCamelCase :Any = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
UpperCamelCase :Dict = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0
UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase :Tuple = 2.0 * image - 1.0
UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
UpperCamelCase :int = True
UpperCamelCase :Dict = va.device
UpperCamelCase :List[Any] = va.cpu().numpy()
UpperCamelCase :str = va.cpu().numpy()
UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
UpperCamelCase :Any = (1 - t) * va + t * va
else:
UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = theta_a * t
UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a
UpperCamelCase :Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
for param in model.parameters():
UpperCamelCase :Any = value
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Union[str, Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# get the original timestep using init_timestep
UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[str] = 0.1_8215 * init_latents
UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = init_latents
return latents
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
UpperCamelCase :List[str] = latents.detach().requires_grad_()
UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = self.scheduler.sigmas[index]
UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :int = 1 / 0.1_8215 * sample
UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = latents.detach() + grads * (sigma**2)
UpperCamelCase :Optional[Any] = noise_pred_original
else:
UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1:
UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1)
UpperCamelCase :Tuple = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# set timesteps
UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase :List[str] = {}
if accepts_offset:
UpperCamelCase :Tuple = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = slerp(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase :Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase :Any = content_text_input.input_ids.shape[-1]
UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase :int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase :str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase :Dict = {}
if accepts_eta:
UpperCamelCase :int = eta
# check if the scheduler accepts generator
UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase :List[str] = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 )
UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase :int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase :str = self.cond_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents
UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=36 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1000 , ) -> Union[str, Any]:
UpperCamelCase :List[Any] = parent
UpperCamelCase :Dict = batch_size
UpperCamelCase :Any = num_channels
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :int = patch_size
UpperCamelCase :List[Any] = is_training
UpperCamelCase :Optional[int] = use_input_mask
UpperCamelCase :Any = use_token_type_ids
UpperCamelCase :List[Any] = use_labels
UpperCamelCase :Tuple = vocab_size
UpperCamelCase :List[str] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :List[str] = num_attention_heads
UpperCamelCase :Dict = intermediate_size
UpperCamelCase :Any = hidden_act
UpperCamelCase :int = hidden_dropout_prob
UpperCamelCase :Dict = attention_probs_dropout_prob
UpperCamelCase :Optional[int] = max_position_embeddings
UpperCamelCase :str = type_vocab_size
UpperCamelCase :Dict = type_sequence_label_size
UpperCamelCase :Dict = initializer_range
UpperCamelCase :List[str] = coordinate_size
UpperCamelCase :Tuple = shape_size
UpperCamelCase :Dict = num_labels
UpperCamelCase :Optional[Any] = num_choices
UpperCamelCase :List[str] = scope
UpperCamelCase :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase :List[str] = text_seq_length
UpperCamelCase :Union[str, Any] = (image_size // patch_size) ** 2 + 1
UpperCamelCase :Tuple = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase :Optional[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase :Optional[int] = bbox[i, j, 3]
UpperCamelCase :Optional[int] = bbox[i, j, 1]
UpperCamelCase :Optional[int] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase :Optional[int] = bbox[i, j, 2]
UpperCamelCase :Any = bbox[i, j, 0]
UpperCamelCase :List[Any] = tmp_coordinate
UpperCamelCase :int = tf.constant(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_input_mask:
UpperCamelCase :Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase :int = None
UpperCamelCase :str = None
if self.use_labels:
UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase :str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :str = TFLayoutLMvaModel(config=SCREAMING_SNAKE_CASE_ )
# text + image
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase :Any = model({'''pixel_values''': pixel_values} , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :Optional[int] = self.num_labels
UpperCamelCase :Any = TFLayoutLMvaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Union[str, Any] = self.num_labels
UpperCamelCase :Tuple = TFLayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :Optional[int] = 2
UpperCamelCase :Any = TFLayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :Union[str, Any] = config_and_inputs
UpperCamelCase :Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Union[str, Any] =(
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : List[str] =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
return True
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> dict:
UpperCamelCase :Tuple = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :List[Any] = {
k: tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :List[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Dict = TFLayoutLMvaModelTester(self )
UpperCamelCase :int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ )
if getattr(SCREAMING_SNAKE_CASE_ , '''hf_compute_loss''' , SCREAMING_SNAKE_CASE_ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase :Dict = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=SCREAMING_SNAKE_CASE_ )[0]
]
UpperCamelCase :int = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase :List[Any] = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = prepared_for_class.pop('''input_ids''' )
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCamelCase :str = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase :Optional[int] = -100
UpperCamelCase :str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase :List[str] = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase :int = self._prepare_for_class(inputs_dict.copy() , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase :Tuple = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase :int = inspect.signature(model.call ).parameters
UpperCamelCase :int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase :Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCamelCase :Union[str, Any] = signature_names.index(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = label_key
UpperCamelCase :List[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase :Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase :str = prepared_for_class[value]
UpperCamelCase :List[str] = tuple(SCREAMING_SNAKE_CASE_ )
# Send to model
UpperCamelCase :Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self ) -> str:
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase :Optional[int] = type
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Dict:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Union[str, Any] = TFLayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCamelCase :List[str] = self.default_image_processor
UpperCamelCase :List[str] = prepare_img()
UpperCamelCase :Any = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' ).pixel_values
UpperCamelCase :Union[str, Any] = tf.constant([[1, 2]] )
UpperCamelCase :Union[str, Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase :str = model(input_ids=SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :List[str] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[int]] = []
UpperCamelCase :list[int] = []
UpperCamelCase :List[str] = 0
UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
__snake_case = [3, 34, 4, 12, 5, 2]
__snake_case = 9
__snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 658 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self , *,
SCREAMING_SNAKE_CASE_ = 4 , SCREAMING_SNAKE_CASE_ = 768 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
super().__init__()
UpperCamelCase :int = nn.Parameter(torch.zeros(SCREAMING_SNAKE_CASE_ ) )
# parameters for additional clip time embeddings
UpperCamelCase :List[str] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# parameters for encoder hidden states
UpperCamelCase :Any = clip_extra_context_tokens
UpperCamelCase :Dict = nn.Linear(
SCREAMING_SNAKE_CASE_ , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase :List[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = nn.LayerNorm(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , *, SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase :Tuple = image_embeddings.shape[0]
UpperCamelCase :str = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase :Optional[int] = classifier_free_guidance_embeddings.expand(
SCREAMING_SNAKE_CASE_ , -1 )
UpperCamelCase :Dict = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase :List[Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase :int = self.embedding_proj(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.clip_image_embeddings_project_to_time_embeddings(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase :Tuple = self.clip_extra_context_tokens_proj(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = clip_extra_context_tokens.reshape(SCREAMING_SNAKE_CASE_ , -1 , self.clip_extra_context_tokens )
UpperCamelCase :List[str] = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase :Optional[Any] = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.text_encoder_hidden_states_norm(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
UpperCamelCase :Tuple = 0
UpperCamelCase :str = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 658 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCamelCase :Optional[Any] = args.pruning_method
UpperCamelCase :str = args.threshold
UpperCamelCase :Optional[int] = args.model_name_or_path.rstrip('''/''' )
UpperCamelCase :Union[str, Any] = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
UpperCamelCase :str = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , '''pytorch_model.bin''' ) )
UpperCamelCase :List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCamelCase :Union[str, Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
UpperCamelCase :Optional[int] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
UpperCamelCase :str = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
UpperCamelCase :str = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE__ , threshold=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCamelCase :str = name[:-6]
UpperCamelCase :Union[str, Any] = model[F'''{prefix_}mask_scores''']
UpperCamelCase :int = TopKBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCamelCase :List[str] = name[:-6]
UpperCamelCase :Optional[Any] = model[F'''{prefix_}mask_scores''']
UpperCamelCase :Union[str, Any] = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCamelCase :Union[str, Any] = name[:-6]
UpperCamelCase :Tuple = model[F'''{prefix_}mask_scores''']
UpperCamelCase , UpperCamelCase :int = -0.1, 1.1
UpperCamelCase :Tuple = torch.sigmoid(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = s * (r - l) + l
UpperCamelCase :Optional[int] = s_bar.clamp(min=0.0 , max=1.0 )
UpperCamelCase :Dict = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCamelCase :Tuple = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE__ ) , F'''bertarized_{os.path.basename(SCREAMING_SNAKE_CASE__ )}''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
shutil.copytree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
__snake_case = parser.parse_args()
main(args)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCamelCase :str = hex_num[0] == '''-'''
if is_negative:
UpperCamelCase :Union[str, Any] = hex_num[1:]
try:
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCamelCase :Dict = ''''''
while int_num > 0:
UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Any ='visual_bert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = vocab_size
UpperCamelCase :Tuple = max_position_embeddings
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :Any = visual_embedding_dim
UpperCamelCase :Tuple = num_hidden_layers
UpperCamelCase :Optional[int] = num_attention_heads
UpperCamelCase :Optional[Any] = intermediate_size
UpperCamelCase :Union[str, Any] = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :List[str] = initializer_range
UpperCamelCase :Optional[Any] = type_vocab_size
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Tuple = bypass_transformer
UpperCamelCase :Dict = special_visual_initialize
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase , UpperCamelCase :List[Any] = position
UpperCamelCase :Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase :Dict = []
for position in positions:
UpperCamelCase , UpperCamelCase :str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE__ )
return permissible_positions
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
if is_complete(SCREAMING_SNAKE_CASE__ ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase , UpperCamelCase :Optional[int] = position
if board[y][x] == 0:
UpperCamelCase :Any = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ):
return True
UpperCamelCase :Union[str, Any] = 0
return False
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ):
return board
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from typing import List
import numpy as np
def _A ( SCREAMING_SNAKE_CASE__ : dict ):
UpperCamelCase :Dict = {key: len(SCREAMING_SNAKE_CASE__ ) for key, value in gen_kwargs.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
UpperCamelCase :List[Any] = max(lists_lengths.values() , default=0 )
return max(1 , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Dict = []
for group_idx in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[str] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCamelCase :Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCamelCase :Union[str, Any] = range(SCREAMING_SNAKE_CASE__ , start + num_shards_to_add )
shards_indices_per_group.append(SCREAMING_SNAKE_CASE__ )
return shards_indices_per_group
def _A ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :int = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
if num_shards == 1:
return [dict(SCREAMING_SNAKE_CASE__ )]
else:
UpperCamelCase :int = _distribute_shards(num_shards=SCREAMING_SNAKE_CASE__ , max_num_jobs=SCREAMING_SNAKE_CASE__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(SCREAMING_SNAKE_CASE__ ) )
]
def _A ( SCREAMING_SNAKE_CASE__ : List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , SCREAMING_SNAKE_CASE__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( SCREAMING_SNAKE_CASE__ : np.random.Generator , SCREAMING_SNAKE_CASE__ : dict ):
UpperCamelCase :Union[str, Any] = {len(SCREAMING_SNAKE_CASE__ ) for value in gen_kwargs.values() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
UpperCamelCase :Any = {}
for size in list_sizes:
UpperCamelCase :Any = list(range(SCREAMING_SNAKE_CASE__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCamelCase :Optional[Any] = dict(SCREAMING_SNAKE_CASE__ )
for key, value in shuffled_kwargs.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = [value[i] for i in indices_per_size[len(SCREAMING_SNAKE_CASE__ )]]
return shuffled_kwargs
| 658 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :int = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = GenerationConfig()
UpperCamelCase :List[str] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = GenerationConfig()
UpperCamelCase :Tuple = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
UpperCamelCase :Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase :List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 658 | 1 |
from __future__ import annotations
from math import pi
def _A ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Any =(IPNDMScheduler,)
UpperCamelCase_ : Union[str, Any] =(('num_inference_steps', 50),)
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :int = {'''num_train_timesteps''': 1000}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :List[Any] = dict(self.forward_default_kwargs )
UpperCamelCase :Optional[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.dummy_sample
UpperCamelCase :List[Any] = 0.1 * sample
UpperCamelCase :Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase :Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
UpperCamelCase :int = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase :Any = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
UpperCamelCase :str = dummy_past_residuals[:]
UpperCamelCase :Tuple = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Optional[int] = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self ) -> List[Any]:
pass
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Tuple = dict(self.forward_default_kwargs )
UpperCamelCase :Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = self.dummy_sample
UpperCamelCase :Tuple = 0.1 * sample
UpperCamelCase :str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase :Optional[int] = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase :Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase :Optional[int] = dummy_past_residuals[:]
UpperCamelCase :Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Union[str, Any] = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Optional[Any] = self.scheduler_classes[0]
UpperCamelCase :Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = 10
UpperCamelCase :int = self.dummy_model()
UpperCamelCase :Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = dict(self.forward_default_kwargs )
UpperCamelCase :str = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.dummy_sample
UpperCamelCase :Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_ , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_ , '''set_timesteps''' ):
UpperCamelCase :str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase :str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase :int = dummy_past_residuals[:]
UpperCamelCase :List[Any] = scheduler.timesteps[5]
UpperCamelCase :str = scheduler.timesteps[6]
UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase :Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> Tuple:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ , time_step=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ , time_step=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = self.full_loop()
UpperCamelCase :Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 658 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M'
UpperCamelCase_ : Optional[Any] =(
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCamelCase_ : Dict ='translator'
UpperCamelCase_ : Any =AutoTokenizer
UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM
UpperCamelCase_ : List[Any] =LANGUAGE_CODES
UpperCamelCase_ : int =['text', 'text', 'text']
UpperCamelCase_ : Union[str, Any] =['text']
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
UpperCamelCase :Optional[int] = self.lang_to_code[src_lang]
UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case = 5_00_00
__snake_case = 50_00
__snake_case , __snake_case = os.path.split(__file__)
__snake_case = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _A ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , SCREAMING_SNAKE_CASE__ : List[str] ):
for i in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = dataset[i]
@get_duration
def _A ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = dataset[i : i + batch_size]
@get_duration
def _A ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = dataset[i]
@get_duration
def _A ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[int] = dataset[i : i + batch_size]
def _A ( ):
UpperCamelCase :int = {'''num examples''': SPEED_TEST_N_EXAMPLES}
UpperCamelCase :Any = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
UpperCamelCase :Optional[Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
UpperCamelCase :str = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase :Union[str, Any] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase :Optional[Any] = func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
print('''shuffling dataset''' )
UpperCamelCase :Union[str, Any] = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase :Union[str, Any] = func(
SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 658 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if array[i] == target:
return i
return -1
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Tuple = 0
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1
UpperCamelCase :str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase :int = one_third - 1
elif array[two_third] < target:
UpperCamelCase :Any = two_third + 1
else:
UpperCamelCase :Any = one_third + 1
UpperCamelCase :int = two_third - 1
else:
return -1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = (left + right) // 3 + 1
UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 658 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase :Any = 128
elif "12-12" in model_name:
UpperCamelCase :Union[str, Any] = 12
UpperCamelCase :Any = 12
elif "14-14" in model_name:
UpperCamelCase :Optional[int] = 14
UpperCamelCase :List[str] = 14
elif "16-16" in model_name:
UpperCamelCase :List[Any] = 16
UpperCamelCase :Optional[Any] = 16
else:
raise ValueError('''Model not supported''' )
UpperCamelCase :Tuple = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCamelCase :Optional[Any] = 35
UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json'''
else:
UpperCamelCase :Optional[int] = 527
UpperCamelCase :List[Any] = '''audioset-id2label.json'''
UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :List[Any] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
return config
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if "module.v" in name:
UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
UpperCamelCase :Any = key.split('''.''' )
UpperCamelCase :str = int(key_split[3] )
UpperCamelCase :Union[str, Any] = config.hidden_size
if "weight" in key:
UpperCamelCase :List[str] = val[:dim, :]
UpperCamelCase :Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase :Optional[Any] = val[-dim:, :]
else:
UpperCamelCase :Dict = val[:dim]
UpperCamelCase :Optional[int] = val[dim : dim * 2]
UpperCamelCase :List[Any] = val[-dim:]
else:
UpperCamelCase :Union[str, Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ):
UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCamelCase :Optional[int] = model_name_to_url[model_name]
UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128
UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array''']
else:
UpperCamelCase :List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = waveform.squeeze().numpy()
UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(SCREAMING_SNAKE_CASE__ , max_perimeter + 1 ):
UpperCamelCase :List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _A ( SCREAMING_SNAKE_CASE__ : int = 1000 ):
UpperCamelCase :Optional[int] = pythagorean_triple(SCREAMING_SNAKE_CASE__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='focalnet'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :int = embed_dim
UpperCamelCase :Optional[Any] = use_conv_embed
UpperCamelCase :str = hidden_sizes
UpperCamelCase :str = depths
UpperCamelCase :Optional[int] = focal_levels
UpperCamelCase :Tuple = focal_windows
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Optional[int] = mlp_ratio
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :int = drop_path_rate
UpperCamelCase :Dict = use_layerscale
UpperCamelCase :List[str] = layerscale_value
UpperCamelCase :Tuple = use_post_layernorm
UpperCamelCase :int = use_post_layernorm_in_modulation
UpperCamelCase :str = normalize_modulator
UpperCamelCase :Any = initializer_range
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Dict = encoder_stride
UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
UpperCamelCase :int = 0
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _A ( SCREAMING_SNAKE_CASE__ : str ):
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return arr, 0
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCamelCase :int = arr[0:mid]
UpperCamelCase :Optional[Any] = arr[mid:]
UpperCamelCase , UpperCamelCase :List[str] = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase :int = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase :List[str] = _count_cross_inversions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Union[str, Any] = 0
while i < len(SCREAMING_SNAKE_CASE__ ) and j < len(SCREAMING_SNAKE_CASE__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _A ( ):
UpperCamelCase :str = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCamelCase :int = count_inversions_bf(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase :Optional[int] = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCamelCase :int = count_inversions_bf(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase :int = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE__ )
# an empty list should also have zero inversions
UpperCamelCase :int = []
UpperCamelCase :Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase :Tuple = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 658 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Tuple = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Any = patch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :int = is_training
UpperCamelCase :str = use_labels
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :List[Any] = backbone_out_indices
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :int = backbone_featmap_shape
UpperCamelCase :Any = scope
UpperCamelCase :int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Dict = (image_size // patch_size) ** 2
UpperCamelCase :List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Optional[Any] = self.num_labels
UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Optional[int] = self.num_labels
UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Tuple =False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Any = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[Any] = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Any:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[str]] , SCREAMING_SNAKE_CASE__ : int , ):
UpperCamelCase :Optional[int] = len(SCREAMING_SNAKE_CASE__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[str]] = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE__ )
print('''''' )
print(len(SCREAMING_SNAKE_CASE__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 658 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase :Any = 128
elif "12-12" in model_name:
UpperCamelCase :Union[str, Any] = 12
UpperCamelCase :Any = 12
elif "14-14" in model_name:
UpperCamelCase :Optional[int] = 14
UpperCamelCase :List[str] = 14
elif "16-16" in model_name:
UpperCamelCase :List[Any] = 16
UpperCamelCase :Optional[Any] = 16
else:
raise ValueError('''Model not supported''' )
UpperCamelCase :Tuple = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCamelCase :Optional[Any] = 35
UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json'''
else:
UpperCamelCase :Optional[int] = 527
UpperCamelCase :List[Any] = '''audioset-id2label.json'''
UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :List[Any] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
return config
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if "module.v" in name:
UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
UpperCamelCase :Any = key.split('''.''' )
UpperCamelCase :str = int(key_split[3] )
UpperCamelCase :Union[str, Any] = config.hidden_size
if "weight" in key:
UpperCamelCase :List[str] = val[:dim, :]
UpperCamelCase :Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase :Optional[Any] = val[-dim:, :]
else:
UpperCamelCase :Dict = val[:dim]
UpperCamelCase :Optional[int] = val[dim : dim * 2]
UpperCamelCase :List[Any] = val[-dim:]
else:
UpperCamelCase :Union[str, Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ):
UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCamelCase :Optional[int] = model_name_to_url[model_name]
UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128
UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array''']
else:
UpperCamelCase :List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = waveform.squeeze().numpy()
UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
UpperCamelCase :Optional[Any] = 1
UpperCamelCase :str = 1
UpperCamelCase :Dict = {1: 1}
for inputa in range(2 , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
UpperCamelCase :str = (3 * number) + 1
counter += 1
if inputa not in counters:
UpperCamelCase :Any = counter
if counter > pre_counter:
UpperCamelCase :Optional[Any] = inputa
UpperCamelCase :int = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
UpperCamelCase :List[str] = {}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> Optional[Any]:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase :List[str] = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = []
def UpperCAmelCase ( self ) -> Optional[Any]:
return list(self.graph )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Union[str, Any]:
if s == d:
return []
UpperCamelCase :Optional[Any] = []
UpperCamelCase :Union[str, Any] = []
if s == -2:
UpperCamelCase :Tuple = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :List[Any] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Any = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-1 ) -> Optional[int]:
if c == -1:
UpperCamelCase :List[str] = floor(random() * 1_0000 ) + 10
for i in range(SCREAMING_SNAKE_CASE_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase :str = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> Tuple:
UpperCamelCase :Union[str, Any] = deque()
UpperCamelCase :Optional[Any] = []
if s == -2:
UpperCamelCase :Optional[int] = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
while d:
UpperCamelCase :List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Optional[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return len(self.graph[u] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> Any:
UpperCamelCase :str = []
UpperCamelCase :Optional[Any] = []
if s == -2:
UpperCamelCase :Dict = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = s
UpperCamelCase :List[str] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :List[Any] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Any = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return sorted_nodes
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :int = []
UpperCamelCase :int = []
UpperCamelCase :Union[str, Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = -2
UpperCamelCase :List[str] = []
UpperCamelCase :Optional[Any] = s
UpperCamelCase :Optional[Any] = False
UpperCamelCase :Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :List[str] = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :List[str] = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Union[str, Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = s
UpperCamelCase :List[Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :int = []
UpperCamelCase :Optional[Any] = []
UpperCamelCase :Union[str, Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = -2
UpperCamelCase :Any = []
UpperCamelCase :List[str] = s
UpperCamelCase :Dict = False
UpperCamelCase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :Dict = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Any = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Optional[Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = s
UpperCamelCase :Optional[Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> int:
UpperCamelCase :int = time()
self.dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = time()
return end - begin
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> Dict:
UpperCamelCase :Optional[int] = time()
self.bfs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = time()
return end - begin
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = {}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> Optional[Any]:
# check if the u exists
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase :int = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase :Dict = [[w, u]]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE_ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Any:
if s == d:
return []
UpperCamelCase :Any = []
UpperCamelCase :List[Any] = []
if s == -2:
UpperCamelCase :List[Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Any = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Optional[int] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-1 ) -> List[str]:
if c == -1:
UpperCamelCase :Union[str, Any] = floor(random() * 1_0000 ) + 10
for i in range(SCREAMING_SNAKE_CASE_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase :Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> List[Any]:
UpperCamelCase :Any = deque()
UpperCamelCase :List[str] = []
if s == -2:
UpperCamelCase :Union[str, Any] = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
while d:
UpperCamelCase :Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return len(self.graph[u] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = []
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = -2
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :List[Any] = s
UpperCamelCase :List[str] = False
UpperCamelCase :List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Optional[int] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :Dict = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :Tuple = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :Tuple = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = s
UpperCamelCase :Optional[Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :int = []
UpperCamelCase :Tuple = []
UpperCamelCase :List[str] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = -2
UpperCamelCase :int = []
UpperCamelCase :Tuple = s
UpperCamelCase :Optional[int] = False
UpperCamelCase :Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase :Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase :Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase :Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase :Optional[int] = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase :int = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase :List[Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = s
UpperCamelCase :Any = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
def UpperCAmelCase ( self ) -> List[str]:
return list(self.graph )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ) -> Any:
UpperCamelCase :int = time()
self.dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = time()
return end - begin
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_=-2 ) -> List[Any]:
UpperCamelCase :List[Any] = time()
self.bfs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = time()
return end - begin
| 658 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__snake_case = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__snake_case = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Tuple = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0
UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ )
return {
"accuracy": accuracy,
}
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : list ):
UpperCamelCase :str = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = collection[i]
UpperCamelCase :Dict = 0
UpperCamelCase :int = i - 1
while low <= high:
UpperCamelCase :Tuple = (low + high) // 2
if val < collection[mid]:
UpperCamelCase :Tuple = mid - 1
else:
UpperCamelCase :Any = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
UpperCamelCase :List[Any] = collection[j - 1]
UpperCamelCase :List[Any] = val
return collection
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 658 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__snake_case = numpy.array([0, 0])
__snake_case = numpy.array([0.5, 0.8_6_6_0_2_5_4])
__snake_case = numpy.array([1, 0])
__snake_case = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = initial_vectors
for _ in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[int] = iteration_step(SCREAMING_SNAKE_CASE__ )
return vectors
def _A ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] ):
UpperCamelCase :List[str] = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCamelCase :List[str] = vectors[i + 1]
new_vectors.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( SCREAMING_SNAKE_CASE__ : numpy.ndarray , SCREAMING_SNAKE_CASE__ : float ):
UpperCamelCase :List[Any] = numpy.radians(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase :Tuple = numpy.cos(SCREAMING_SNAKE_CASE__ ), numpy.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] ):
UpperCamelCase :str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCamelCase , UpperCamelCase :Tuple = zip(*SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 658 |
from __future__ import annotations
from collections.abc import Callable
def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ):
UpperCamelCase :Optional[Any] = x_start
UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase :Any = (x_end - x_start) / steps + xa
UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase :Optional[int] = xa
UpperCamelCase :List[str] = fxa
return area
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__snake_case = 10
while i <= 10_00_00:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 658 | 1 |
import string
def _A ( SCREAMING_SNAKE_CASE__ : str ):
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase :int = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase :str = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = num - key
if num < 0:
UpperCamelCase :Tuple = num + len(string.ascii_uppercase )
UpperCamelCase :List[Any] = translated + string.ascii_uppercase[num]
else:
UpperCamelCase :List[str] = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def _A ( ):
UpperCamelCase :str = input('''Encrypted message: ''' )
UpperCamelCase :Union[str, Any] = message.upper()
decrypt(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,)
UpperCamelCase_ : Any =10
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = 10
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps[0]
UpperCamelCase :Union[str, Any] = scheduler.timesteps[1]
UpperCamelCase :str = self.dummy_sample
UpperCamelCase :List[str] = 0.1 * sample
UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> List[str]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[Any] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps
UpperCamelCase :Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = self.dummy_model()
UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = pred_prev_sample
UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Dict = self.scheduler_classes[0]
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = scheduler.timesteps
UpperCamelCase :int = torch.manual_seed(0 )
UpperCamelCase :str = self.dummy_model()
UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :int = pred_prev_sample
UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :Tuple = self.get_scheduler_config()
UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [39, 30, 12, 1, 0]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[int] = self.scheduler_classes[0]
UpperCamelCase :List[str] = self.get_scheduler_config()
UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Dict = [0] * len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = []
UpperCamelCase :Dict = []
UpperCamelCase :str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE__ )
while queue:
UpperCamelCase :Any = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE__ )
if cnt != len(SCREAMING_SNAKE_CASE__ ):
print('''Cycle exists''' )
else:
print(SCREAMING_SNAKE_CASE__ )
# Adjacency List of Graph
__snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 658 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
UpperCamelCase :int = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
UpperCamelCase :Tuple = to_pil_image(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase :str = pil_image.size
UpperCamelCase :List[str] = pytesseract.image_to_data(SCREAMING_SNAKE_CASE__ , lang=SCREAMING_SNAKE_CASE__ , output_type='''dict''' , config=SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCamelCase :int = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if not word.strip()]
UpperCamelCase :List[str] = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
UpperCamelCase :Any = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
UpperCamelCase :List[str] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
UpperCamelCase :List[str] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
UpperCamelCase :Any = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase :List[str] = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[Any] = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE__ )
# finally, normalize the bounding boxes
UpperCamelCase :Optional[int] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "" , **SCREAMING_SNAKE_CASE_ , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase :List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = do_resize
UpperCamelCase :Union[str, Any] = size
UpperCamelCase :Any = resample
UpperCamelCase :Dict = apply_ocr
UpperCamelCase :List[str] = ocr_lang
UpperCamelCase :str = tesseract_config
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
UpperCamelCase :Any = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase :Optional[int] = (size['''height'''], size['''width'''])
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> PIL.Image.Image:
UpperCamelCase :List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :Optional[int] = size if size is not None else self.size
UpperCamelCase :Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = resample if resample is not None else self.resample
UpperCamelCase :str = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase :Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase :Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase :Tuple = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase :Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
UpperCamelCase :Any = []
UpperCamelCase :Optional[Any] = []
for image in images:
UpperCamelCase , UpperCamelCase :List[str] = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
words_batch.append(SCREAMING_SNAKE_CASE_ )
boxes_batch.append(SCREAMING_SNAKE_CASE_ )
if do_resize:
UpperCamelCase :Any = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase :Dict = [flip_channel_order(SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase :List[str] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase :Tuple = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE_ )
if apply_ocr:
UpperCamelCase :Optional[Any] = words_batch
UpperCamelCase :Optional[int] = boxes_batch
return data
| 658 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return None
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return None
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =[
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> int:
from transformers import BertModel
UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[str]:
from transformers import BertModel
UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[Any]:
from transformers import TFBertModel
UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 658 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase :List[str] = 5
# Realm tok
UpperCamelCase :List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = self.get_config()
UpperCamelCase :str = self.get_dummy_retriever()
UpperCamelCase :int = retriever.tokenizer
UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' )
UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Tuple = tokenizer(
['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Optional[Any] = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = self.get_config()
UpperCamelCase :Union[str, Any] = self.get_dummy_retriever()
UpperCamelCase :Dict = retriever.tokenizer
UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' )
UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Optional[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Any = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCamelCase :Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 658 | 1 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :List[Any] = int(SCREAMING_SNAKE_CASE__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int=300 ):
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCamelCase :str = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCamelCase :List[Any] = F'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else str(SCREAMING_SNAKE_CASE__ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : int =5
UpperCamelCase_ : List[str] =0.2
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 300 , ) -> List[Any]:
UpperCamelCase :List[Any] = total
UpperCamelCase :Any = '''''' if prefix is None else prefix
UpperCamelCase :Optional[Any] = leave
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Dict = width
UpperCamelCase :str = None
UpperCamelCase :Tuple = None
UpperCamelCase :Optional[int] = None
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None ) -> Dict:
UpperCamelCase :Optional[Any] = value
if comment is not None:
UpperCamelCase :Dict = comment
if self.last_value is None:
UpperCamelCase :str = time.time()
UpperCamelCase :Dict = value
UpperCamelCase :Optional[int] = None
UpperCamelCase :Optional[int] = self.warmup
UpperCamelCase :int = 1
self.update_bar(SCREAMING_SNAKE_CASE_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCamelCase :Any = time.time()
UpperCamelCase :str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCamelCase :Optional[int] = self.elapsed_time / (value - self.start_value)
else:
UpperCamelCase :Dict = None
if value >= self.total:
UpperCamelCase :int = self.total
UpperCamelCase :int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCamelCase :int = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = value
UpperCamelCase :Any = current_time
if self.average_time_per_item is None:
UpperCamelCase :int = 1
else:
UpperCamelCase :Tuple = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCamelCase :Any = ''' ''' * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE_ ) )) + str(SCREAMING_SNAKE_CASE_ )
if self.elapsed_time is None:
UpperCamelCase :Optional[int] = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
UpperCamelCase :Tuple = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
UpperCamelCase :List[str] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCamelCase :Dict = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self ) -> Any:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = None if column_names is None else [column_names]
UpperCamelCase :Union[str, Any] = None
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCamelCase :List[str] = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if self.inner_table is None:
UpperCamelCase :Union[str, Any] = [list(values.keys() ), list(values.values() )]
else:
UpperCamelCase :Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=300 ) -> Dict:
UpperCamelCase :Optional[int] = NotebookProgressBar(SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , parent=self , width=SCREAMING_SNAKE_CASE_ )
return self.child_bar
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :str = None
self.display()
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self ) -> Any:
UpperCamelCase :Union[str, Any] = None
UpperCamelCase :int = None
UpperCamelCase :List[str] = False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :Union[str, Any] = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
UpperCamelCase :Tuple = 0
UpperCamelCase :Dict = 0
UpperCamelCase :Any = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
UpperCamelCase :int = NotebookTrainingTracker(state.max_steps , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :Dict = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
UpperCamelCase :Tuple = False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Dict:
if not has_length(SCREAMING_SNAKE_CASE_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCamelCase :Optional[Any] = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE_ ) )
else:
UpperCamelCase :Tuple = NotebookProgressBar(len(SCREAMING_SNAKE_CASE_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCamelCase :Tuple = None
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Any:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCamelCase :Optional[int] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCamelCase :Any = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if self.training_tracker is not None:
UpperCamelCase :Union[str, Any] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCamelCase :List[str] = log['''loss''']
break
if self.first_column == "Epoch":
UpperCamelCase :str = int(state.epoch )
else:
UpperCamelCase :Optional[Any] = state.global_step
UpperCamelCase :Dict = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
UpperCamelCase :str = re.sub(r'''\_loss$''' , '''''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = metrics.pop('''total_flos''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = metrics.pop('''epoch''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = metrics.pop(F'''{metric_key_prefix}_runtime''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , SCREAMING_SNAKE_CASE_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
UpperCamelCase :Any = v
else:
UpperCamelCase :List[str] = k.split('''_''' )
UpperCamelCase :Union[str, Any] = ''' '''.join([part.capitalize() for part in splits[1:]] )
UpperCamelCase :int = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
self.training_tracker.remove_child()
UpperCamelCase :Union[str, Any] = None
# Evaluation takes a long time so we should force the next update.
UpperCamelCase :Tuple = True
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = None
| 658 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]:
UpperCamelCase :int = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :List[Any] = patch_size
UpperCamelCase :Optional[int] = max_length
UpperCamelCase :Union[str, Any] = num_mel_bins
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :Dict = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :str = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = scope
UpperCamelCase :List[Any] = frequency_stride
UpperCamelCase :Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension
UpperCamelCase :Optional[int] = num_patches + 2
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :str = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self ) -> List[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : Dict =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = ASTModelTester(self )
UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Any = [*signature.parameters.keys()]
UpperCamelCase :Optional[int] = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Any = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Tuple:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.default_feature_extractor
UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self.default_feature_extractor
UpperCamelCase , UpperCamelCase :Dict = prepare_audio()
UpperCamelCase :Dict = audio.squeeze().numpy()
UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
UpperCamelCase :Dict = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase :Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase :int = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
UpperCamelCase :Optional[Any] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_55.0
UpperCamelCase :List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase :Tuple = 2.0 * image - 1.0
UpperCamelCase :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0.99_95 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
UpperCamelCase :int = True
UpperCamelCase :Dict = va.device
UpperCamelCase :List[Any] = va.cpu().numpy()
UpperCamelCase :str = va.cpu().numpy()
UpperCamelCase :Dict = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
UpperCamelCase :Any = (1 - t) * va + t * va
else:
UpperCamelCase :Union[str, Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = theta_a * t
UpperCamelCase :str = np.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase :List[Any] = sin_theta_t / sin_theta_a
UpperCamelCase :Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase :Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
UpperCamelCase :int = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ):
for param in model.parameters():
UpperCamelCase :Any = value
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> str:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Union[str, Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ )
else feature_extractor.size['''shortest_edge''']
)
UpperCamelCase :Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# get the original timestep using init_timestep
UpperCamelCase :Union[str, Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase :Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
UpperCamelCase :Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase :List[str] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
UpperCamelCase :Any = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[str] = 0.1_8215 * init_latents
UpperCamelCase :Optional[Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase :List[Any] = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase :Optional[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = init_latents
return latents
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[str] = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase :Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase :List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :str = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase :int = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
UpperCamelCase :List[str] = latents.detach().requires_grad_()
UpperCamelCase :List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase :List[str] = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase :List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase :int = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = self.scheduler.sigmas[index]
UpperCamelCase :Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :int = 1 / 0.1_8215 * sample
UpperCamelCase :List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :List[str] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
UpperCamelCase :List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
UpperCamelCase :Union[str, Any] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = latents.detach() + grads * (sigma**2)
UpperCamelCase :Optional[Any] = noise_pred_original
else:
UpperCamelCase :List[str] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1:
UpperCamelCase :Optional[int] = [generator] + [None] * (batch_size - 1)
UpperCamelCase :Tuple = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCamelCase :Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase :Dict = ''', '''.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :Any = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
UpperCamelCase :str = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase :Dict = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase :Union[str, Any] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# set timesteps
UpperCamelCase :str = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase :List[str] = {}
if accepts_offset:
UpperCamelCase :Tuple = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase :Tuple = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
UpperCamelCase :Any = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
UpperCamelCase :Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
UpperCamelCase :Dict = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = slerp(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase :Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase :Any = content_text_input.input_ids.shape[-1]
UpperCamelCase :Any = self.tokenizer([''''''] , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase :Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase :Optional[int] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase :str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase :int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase :int = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase :str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase :Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase :Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase :Dict = {}
if accepts_eta:
UpperCamelCase :int = eta
# check if the scheduler accepts generator
UpperCamelCase :Optional[int] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase :List[str] = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :List[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :Any = noise_pred.chunk(2 )
UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase :int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase :str = self.cond_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase :List[Any] = 1 / 0.1_8215 * latents
UpperCamelCase :Optional[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase :List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="None" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> List[str]:
UpperCamelCase :List[str] = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :List[Any] = seq_length
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Dict = use_input_mask
UpperCamelCase :List[Any] = use_token_type_ids
UpperCamelCase :Dict = use_labels
UpperCamelCase :int = vocab_size
UpperCamelCase :int = hidden_size
UpperCamelCase :List[str] = num_hidden_layers
UpperCamelCase :Dict = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :Union[str, Any] = hidden_act
UpperCamelCase :Dict = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :str = max_position_embeddings
UpperCamelCase :Dict = type_vocab_size
UpperCamelCase :str = type_sequence_label_size
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Optional[Any] = num_choices
UpperCamelCase :str = relative_attention
UpperCamelCase :Tuple = position_biased_input
UpperCamelCase :Dict = pos_att_type
UpperCamelCase :Optional[int] = scope
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :List[str] = None
if self.use_input_mask:
UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase :Dict = None
if self.use_token_type_ids:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :Any = None
UpperCamelCase :List[str] = None
UpperCamelCase :Union[str, Any] = None
if self.use_labels:
UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> int:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Tuple = self.get_config()
UpperCamelCase :List[Any] = 300
return config
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[str] = DebertaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :Dict = DebertaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :List[Any] = self.num_labels
UpperCamelCase :Optional[int] = DebertaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :int = self.num_labels
UpperCamelCase :Tuple = DebertaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Tuple = DebertaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :str = config_and_inputs
UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] =(
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] =True
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[str] =False
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[int] = DebertaModelTester(self )
UpperCamelCase :List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Optional[Any] = DebertaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase :Union[str, Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase :Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCamelCase :Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[int]] = []
UpperCamelCase :list[int] = []
UpperCamelCase :List[str] = 0
UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
__snake_case = [3, 34, 4, 12, 5, 2]
__snake_case = 9
__snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 658 | 1 |
import cva
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if k in (0.04, 0.06):
UpperCamelCase :Tuple = k
UpperCamelCase :Optional[Any] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
return str(self.k )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> tuple[cva.Mat, list[list[int]]]:
UpperCamelCase :List[str] = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
UpperCamelCase , UpperCamelCase :Dict = img.shape
UpperCamelCase :list[list[int]] = []
UpperCamelCase :int = img.copy()
UpperCamelCase :str = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
UpperCamelCase , UpperCamelCase :Dict = np.gradient(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = dx**2
UpperCamelCase :Tuple = dy**2
UpperCamelCase :int = dx * dy
UpperCamelCase :Optional[int] = 0.04
UpperCamelCase :Any = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
UpperCamelCase :Any = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase :Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase :Dict = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase :str = (wxx * wyy) - (wxy**2)
UpperCamelCase :str = wxx + wyy
UpperCamelCase :str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__snake_case = HarrisCorner(0.0_4, 3)
__snake_case , __snake_case = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
UpperCamelCase :int = str(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( SCREAMING_SNAKE_CASE__ : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
UpperCamelCase :Tuple = 0
UpperCamelCase :str = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""YolosFeatureExtractor"""]
__snake_case = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCamelCase :str = hex_num[0] == '''-'''
if is_negative:
UpperCamelCase :Union[str, Any] = hex_num[1:]
try:
UpperCamelCase :Optional[Any] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCamelCase :Dict = ''''''
while int_num > 0:
UpperCamelCase :Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Tuple = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Any = patch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :int = is_training
UpperCamelCase :str = use_labels
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :List[Any] = backbone_out_indices
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :int = backbone_featmap_shape
UpperCamelCase :Any = scope
UpperCamelCase :int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Dict = (image_size // patch_size) ** 2
UpperCamelCase :List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Optional[Any] = self.num_labels
UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Optional[int] = self.num_labels
UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Tuple =False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Any = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[Any] = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Any:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase , UpperCamelCase :List[Any] = position
UpperCamelCase :Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase :Dict = []
for position in positions:
UpperCamelCase , UpperCamelCase :str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE__ )
return permissible_positions
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
if is_complete(SCREAMING_SNAKE_CASE__ ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase , UpperCamelCase :Optional[int] = position
if board[y][x] == 0:
UpperCamelCase :Any = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ):
return True
UpperCamelCase :Union[str, Any] = 0
return False
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ):
return board
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase :List[str] = 5
# Realm tok
UpperCamelCase :List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase :Any = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[Any] = self.get_config()
UpperCamelCase :str = self.get_dummy_retriever()
UpperCamelCase :int = retriever.tokenizer
UpperCamelCase :Optional[Any] = np.array([0, 3] , dtype='''long''' )
UpperCamelCase :Optional[Any] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Tuple = tokenizer(
['''the fourth'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Optional[Any] = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = self.get_config()
UpperCamelCase :Union[str, Any] = self.get_dummy_retriever()
UpperCamelCase :Dict = retriever.tokenizer
UpperCamelCase :str = np.array([0, 3, 5] , dtype='''long''' )
UpperCamelCase :List[str] = tokenizer(['''Test question'''] ).input_ids
UpperCamelCase :Optional[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
UpperCamelCase :Any = config.reader_seq_len
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :str = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCamelCase :List[str] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCamelCase :Tuple = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase :List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 658 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :int = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = AutoConfig.from_pretrained('''gpt2''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = GenerationConfig()
UpperCamelCase :List[str] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCamelCase :Dict = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = GenerationConfig()
UpperCamelCase :Tuple = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
UpperCamelCase :Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
UpperCamelCase :Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase :List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
UpperCamelCase :List[Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
UpperCamelCase :Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
UpperCamelCase :Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 658 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# Initialise PyTorch model
UpperCamelCase :Optional[Any] = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase :Optional[int] = FunnelBaseModel(SCREAMING_SNAKE_CASE__ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
try:
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as flax_state_f:
UpperCamelCase :Optional[int] = from_bytes(SCREAMING_SNAKE_CASE__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE__ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCamelCase :Tuple = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCamelCase :Tuple = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = ''''''
UpperCamelCase :str = flatten_dict(SCREAMING_SNAKE_CASE__ , sep='''.''' )
UpperCamelCase :Optional[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase :Union[str, Any] = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase :Optional[Any] = flax_key_tuple_array[:-1] + ['''weight''']
UpperCamelCase :Union[str, Any] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase :Union[str, Any] = flax_key_tuple_array[:-1] + ['''weight''']
UpperCamelCase :List[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase :Union[str, Any] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :str = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
UpperCamelCase :Dict = '''.'''.join(SCREAMING_SNAKE_CASE__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase :Dict = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
UpperCamelCase :Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
UpperCamelCase :Tuple = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
return pt_model
| 658 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='facebook/nllb-200-distilled-600M'
UpperCamelCase_ : Optional[Any] =(
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCamelCase_ : Dict ='translator'
UpperCamelCase_ : Any =AutoTokenizer
UpperCamelCase_ : Optional[Any] =AutoModelForSeqaSeqLM
UpperCamelCase_ : List[Any] =LANGUAGE_CODES
UpperCamelCase_ : int =['text', 'text', 'text']
UpperCamelCase_ : Union[str, Any] =['text']
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
UpperCamelCase :Optional[int] = self.lang_to_code[src_lang]
UpperCamelCase :Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , ) -> str:
UpperCamelCase :int = parent
UpperCamelCase :List[Any] = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :str = patch_size
UpperCamelCase :Any = num_channels
UpperCamelCase :str = is_training
UpperCamelCase :List[str] = use_labels
UpperCamelCase :Tuple = hidden_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :Optional[int] = num_attention_heads
UpperCamelCase :Optional[Any] = intermediate_size
UpperCamelCase :Dict = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :List[Any] = type_sequence_label_size
UpperCamelCase :List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :List[Any] = (image_size // patch_size) ** 2
UpperCamelCase :Any = num_patches + 1
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Dict = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :List[Any] = (self.image_size, self.image_size)
UpperCamelCase :Dict = (self.patch_size, self.patch_size)
UpperCamelCase :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.type_sequence_label_size
UpperCamelCase :Union[str, Any] = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase :Union[str, Any] = 1
UpperCamelCase :int = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :int = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) :List[Any] = config_and_inputs
UpperCamelCase :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase ( self ) -> None:
UpperCamelCase :Optional[Any] = FlaxViTModelTester(self )
UpperCamelCase :Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :str = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase :Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase :str = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase :Dict = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase ( self ) -> int:
for model_class_name in self.all_model_classes:
UpperCamelCase :Any = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
UpperCamelCase :Tuple = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 658 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if array[i] == target:
return i
return -1
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Tuple = 0
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1
UpperCamelCase :str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase :int = one_third - 1
elif array[two_third] < target:
UpperCamelCase :Any = two_third + 1
else:
UpperCamelCase :Any = one_third + 1
UpperCamelCase :int = two_third - 1
else:
return -1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = (left + right) // 3 + 1
UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 658 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658 |
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if n == 0:
return 0
UpperCamelCase :Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :str = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ ) )
return max_revue
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
UpperCamelCase :Union[str, Any] = max(
SCREAMING_SNAKE_CASE__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
UpperCamelCase :str = max_revenue
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
_enforce_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCamelCase :Dict = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Tuple = max_revenue_i
return max_rev[n]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if n < 0:
UpperCamelCase :Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if n > len(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
def _A ( ):
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :List[str] = len(SCREAMING_SNAKE_CASE__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :str = 36
UpperCamelCase :int = top_down_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658 | 1 |
__snake_case = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='focalnet'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :int = embed_dim
UpperCamelCase :Optional[Any] = use_conv_embed
UpperCamelCase :str = hidden_sizes
UpperCamelCase :str = depths
UpperCamelCase :Optional[int] = focal_levels
UpperCamelCase :Tuple = focal_windows
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Optional[int] = mlp_ratio
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :int = drop_path_rate
UpperCamelCase :Dict = use_layerscale
UpperCamelCase :List[str] = layerscale_value
UpperCamelCase :Tuple = use_post_layernorm
UpperCamelCase :int = use_post_layernorm_in_modulation
UpperCamelCase :str = normalize_modulator
UpperCamelCase :Any = initializer_range
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Dict = encoder_stride
UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 658 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.2 , SCREAMING_SNAKE_CASE_=0.2 ) -> Tuple:
UpperCamelCase :Tuple = bp_numa
UpperCamelCase :Any = bp_numa
UpperCamelCase :str = bp_numa
UpperCamelCase :Dict = conva_get[:2]
UpperCamelCase :Dict = conva_get[2]
UpperCamelCase :int = size_pa
UpperCamelCase :Union[str, Any] = rate_w
UpperCamelCase :int = rate_t
UpperCamelCase :List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCamelCase :Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase :Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase :str = -2 * np.random.rand(self.conva[1] ) + 1
UpperCamelCase :Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
UpperCamelCase :Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
# save model dict with pickle
UpperCamelCase :Union[str, Any] = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ ) -> int:
# read saved model
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
UpperCamelCase :Optional[int] = pickle.load(SCREAMING_SNAKE_CASE_ ) # noqa: S301
UpperCamelCase :List[Any] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
UpperCamelCase :Tuple = model_dic.get('''size_pooling1''' )
UpperCamelCase :Tuple = model_dic.get('''num_bp1''' )
UpperCamelCase :Optional[Any] = model_dic.get('''num_bp2''' )
UpperCamelCase :List[str] = model_dic.get('''num_bp3''' )
UpperCamelCase :Tuple = model_dic.get('''rate_weight''' )
UpperCamelCase :str = model_dic.get('''rate_thre''' )
# create model instance
UpperCamelCase :Tuple = CNN(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# modify model parameter
UpperCamelCase :Tuple = model_dic.get('''w_conv1''' )
UpperCamelCase :Any = model_dic.get('''wkj''' )
UpperCamelCase :List[str] = model_dic.get('''vji''' )
UpperCamelCase :List[Any] = model_dic.get('''thre_conv1''' )
UpperCamelCase :Tuple = model_dic.get('''thre_bp2''' )
UpperCamelCase :Optional[int] = model_dic.get('''thre_bp3''' )
return conv_ins
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return 1 / (1 + np.exp(-1 * x ))
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return round(SCREAMING_SNAKE_CASE_ , 3 )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# convolution process
UpperCamelCase :str = convs[0]
UpperCamelCase :List[Any] = convs[1]
UpperCamelCase :Union[str, Any] = np.shape(SCREAMING_SNAKE_CASE_ )[0]
# get the data slice of original image data, data_focus
UpperCamelCase :List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE_ ):
for j_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(SCREAMING_SNAKE_CASE_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCamelCase :Dict = []
UpperCamelCase :Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[Any] = []
for i_focus in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :int = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Optional[Any] = np.asmatrix(SCREAMING_SNAKE_CASE_ ).reshape(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
data_featuremap.append(SCREAMING_SNAKE_CASE_ )
# expanding the data slice to One dimenssion
UpperCamelCase :Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :int = np.asarray(SCREAMING_SNAKE_CASE_ )
return focus_list, data_featuremap
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="average_pool" ) -> Tuple:
# pooling process
UpperCamelCase :List[Any] = len(featuremaps[0] )
UpperCamelCase :str = int(size_map / size_pooling )
UpperCamelCase :Optional[Any] = []
for i_map in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :int = featuremaps[i_map]
UpperCamelCase :List[str] = []
for i_focus in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for j_focus in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(SCREAMING_SNAKE_CASE_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :List[str] = np.asmatrix(SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
featuremap_pooled.append(SCREAMING_SNAKE_CASE_ )
return featuremap_pooled
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCamelCase :Dict = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :List[Any] = np.shape(data[i] )
UpperCamelCase :Optional[int] = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCamelCase :Union[str, Any] = data_listed.getA().tolist()[0]
data_expanded.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = np.asarray(SCREAMING_SNAKE_CASE_ )
return data_expanded
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
# expanding matrix to one dimension list
UpperCamelCase :Any = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = np.shape(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Tuple = []
UpperCamelCase :List[Any] = 0
for i_map in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[int] = np.ones((size_map, size_map) )
for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for j in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :List[str] = pd_pool[
i_pool
]
UpperCamelCase :Tuple = i_pool + 1
UpperCamelCase :str = np.multiply(
SCREAMING_SNAKE_CASE_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(SCREAMING_SNAKE_CASE_ )
return pd_all
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=bool ) -> List[Any]:
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(SCREAMING_SNAKE_CASE_ )) )
print((''' - - Shape: Teach_Data ''', np.shape(SCREAMING_SNAKE_CASE_ )) )
UpperCamelCase :int = 0
UpperCamelCase :Any = []
UpperCamelCase :int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
UpperCamelCase :Tuple = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(SCREAMING_SNAKE_CASE_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCamelCase :Optional[Any] = np.asmatrix(datas_train[p] )
UpperCamelCase :Optional[int] = np.asarray(datas_teach[p] )
UpperCamelCase , UpperCamelCase :Tuple = self.convolute(
SCREAMING_SNAKE_CASE_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase :Tuple = self.pooling(SCREAMING_SNAKE_CASE_ , self.size_poolinga )
UpperCamelCase :str = np.shape(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = self._expand(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = data_bp_input
UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE_ , self.vji.T ) - self.thre_bpa
UpperCamelCase :Any = self.sig(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = np.dot(SCREAMING_SNAKE_CASE_ , self.wkj.T ) - self.thre_bpa
UpperCamelCase :List[str] = self.sig(SCREAMING_SNAKE_CASE_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCamelCase :int = np.multiply(
(data_teach - bp_outa) , np.multiply(SCREAMING_SNAKE_CASE_ , (1 - bp_outa) ) )
UpperCamelCase :Optional[Any] = np.multiply(
np.dot(SCREAMING_SNAKE_CASE_ , self.wkj ) , np.multiply(SCREAMING_SNAKE_CASE_ , (1 - bp_outa) ) )
UpperCamelCase :Dict = np.dot(SCREAMING_SNAKE_CASE_ , self.vji )
UpperCamelCase :str = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCamelCase :List[Any] = pd_conva_pooled.T.getA().tolist()
UpperCamelCase :List[str] = self._calculate_gradient_from_pool(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCamelCase :Union[str, Any] = self._expand_mat(pd_conva_all[k_conv] )
UpperCamelCase :Union[str, Any] = self.rate_weight * np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCamelCase :Optional[int] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCamelCase :Union[str, Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCamelCase :List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCamelCase :str = self.thre_bpa - pd_k_all * self.rate_thre
UpperCamelCase :Tuple = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCamelCase :Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCamelCase :Union[str, Any] = rp + 1
UpperCamelCase :int = error_count / patterns
all_mse.append(SCREAMING_SNAKE_CASE_ )
def draw_error():
UpperCamelCase :List[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(SCREAMING_SNAKE_CASE_ , '''+-''' )
plt.plot(SCREAMING_SNAKE_CASE_ , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(SCREAMING_SNAKE_CASE_ , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
# model predict
UpperCamelCase :List[Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(SCREAMING_SNAKE_CASE_ )) )
for p in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase :str = np.asmatrix(datas_test[p] )
UpperCamelCase , UpperCamelCase :Tuple = self.convolute(
SCREAMING_SNAKE_CASE_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase :int = self.pooling(SCREAMING_SNAKE_CASE_ , self.size_poolinga )
UpperCamelCase :Union[str, Any] = self._expand(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = data_bp_input
UpperCamelCase :Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCamelCase :Any = self.sig(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCamelCase :List[Any] = self.sig(SCREAMING_SNAKE_CASE_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCamelCase :str = [list(map(self.do_round , SCREAMING_SNAKE_CASE_ ) ) for each in produce_out]
return np.asarray(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
# return the data of image after convoluting process so we can check it out
UpperCamelCase :Tuple = np.asmatrix(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :int = self.convolute(
SCREAMING_SNAKE_CASE_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase :int = self.pooling(SCREAMING_SNAKE_CASE_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 658 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Tuple = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Any = patch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :int = is_training
UpperCamelCase :str = use_labels
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :List[Any] = backbone_out_indices
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :int = backbone_featmap_shape
UpperCamelCase :Any = scope
UpperCamelCase :int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Dict = (image_size // patch_size) ** 2
UpperCamelCase :List[str] = num_patches + 1
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Tuple = None
if self.use_labels:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :List[str] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Optional[Any] = self.num_labels
UpperCamelCase :Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Optional[int] = self.num_labels
UpperCamelCase :int = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Tuple =False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Any = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[Any] = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Any:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 658 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _A ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int = 16000 ):
UpperCamelCase :Any = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE__ ) <= sample_length:
return wav
UpperCamelCase :Any = randint(0 , len(SCREAMING_SNAKE_CASE__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : Optional[str] =field(default=lowercase, metadata={'help': 'Name of a dataset from the datasets package'} )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'A file containing the training audio paths and labels.'} )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'A file containing the validation audio paths and labels.'} )
UpperCamelCase_ : str =field(
default='train', metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
}, )
UpperCamelCase_ : str =field(
default='validation', metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
}, )
UpperCamelCase_ : str =field(
default='audio', metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''}, )
UpperCamelCase_ : str =field(
default='label', metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
UpperCamelCase_ : Optional[int] =field(
default=lowercase, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
UpperCamelCase_ : Optional[int] =field(
default=lowercase, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
UpperCamelCase_ : float =field(
default=20, metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'}, )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : str =field(
default='facebook/wav2vec2-base', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}, )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
UpperCamelCase_ : str =field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'Name or path of preprocessor config.'} )
UpperCamelCase_ : bool =field(
default=lowercase, metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
UpperCamelCase_ : bool =field(
default=lowercase, metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
UpperCamelCase_ : bool =field(
default=lowercase, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
UpperCamelCase_ : Optional[bool] =field(
default=lowercase, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ : bool =field(
default=lowercase, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'}, )
def UpperCAmelCase ( self ) -> List[str]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , SCREAMING_SNAKE_CASE_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase :Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase :int = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase :Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase :Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase :Dict = DatasetDict()
UpperCamelCase :str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase :List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase :str = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase :Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase :List[Any] = feature_extractor.model_input_names[0]
def train_transforms(SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :int = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase :List[Any] = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase :Any = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
UpperCamelCase :Optional[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Optional[Any] = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
UpperCamelCase :Tuple = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase :str = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
UpperCamelCase :Optional[Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase :Dict = raw_datasets['''train'''].features[data_args.label_column_name].names
UpperCamelCase , UpperCamelCase :Dict = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[Any] = str(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = label
# Load the accuracy metric from the datasets package
UpperCamelCase :str = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Any = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=eval_pred.label_ids )
UpperCamelCase :Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase :str = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase :Optional[Any] = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase :int = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
UpperCamelCase :Optional[int] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
UpperCamelCase :str = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase :Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase :List[str] = last_checkpoint
UpperCamelCase :Optional[int] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase :Union[str, Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
UpperCamelCase :Optional[int] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 658 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase :Any = 128
elif "12-12" in model_name:
UpperCamelCase :Union[str, Any] = 12
UpperCamelCase :Any = 12
elif "14-14" in model_name:
UpperCamelCase :Optional[int] = 14
UpperCamelCase :List[str] = 14
elif "16-16" in model_name:
UpperCamelCase :List[Any] = 16
UpperCamelCase :Optional[Any] = 16
else:
raise ValueError('''Model not supported''' )
UpperCamelCase :Tuple = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCamelCase :Optional[Any] = 35
UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json'''
else:
UpperCamelCase :Optional[int] = 527
UpperCamelCase :List[Any] = '''audioset-id2label.json'''
UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :List[Any] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
return config
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if "module.v" in name:
UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
UpperCamelCase :Any = key.split('''.''' )
UpperCamelCase :str = int(key_split[3] )
UpperCamelCase :Union[str, Any] = config.hidden_size
if "weight" in key:
UpperCamelCase :List[str] = val[:dim, :]
UpperCamelCase :Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase :Optional[Any] = val[-dim:, :]
else:
UpperCamelCase :Dict = val[:dim]
UpperCamelCase :Optional[int] = val[dim : dim * 2]
UpperCamelCase :List[Any] = val[-dim:]
else:
UpperCamelCase :Union[str, Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ):
UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCamelCase :Optional[int] = model_name_to_url[model_name]
UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128
UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array''']
else:
UpperCamelCase :List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = waveform.squeeze().numpy()
UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""nielsr/canine-s""": 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
__snake_case = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__snake_case = 0
__snake_case = 0XE0_00
__snake_case = 0XE0_01
__snake_case = 0XE0_02
__snake_case = 0XE0_03
__snake_case = 0XE0_04
# Maps special codepoints to human-readable names.
__snake_case = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__snake_case = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2048 , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
UpperCamelCase :Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
UpperCamelCase :Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
UpperCamelCase :Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
UpperCamelCase :int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
UpperCamelCase :Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase :Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , model_max_length=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase :Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase :int = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase :Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase :Tuple = UNICODE_VOCAB_SIZE
UpperCamelCase :Union[str, Any] = len(self._special_codepoints )
@property
def UpperCAmelCase ( self ) -> int:
return self._unicode_vocab_size
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
try:
return ord(SCREAMING_SNAKE_CASE_ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(SCREAMING_SNAKE_CASE_ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return "".join(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase :Dict = [self.sep_token_id]
UpperCamelCase :Union[str, Any] = [self.cls_token_id]
UpperCamelCase :Optional[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return result
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase :int = [self.sep_token_id]
UpperCamelCase :List[str] = [self.cls_token_id]
UpperCamelCase :Optional[int] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Optional[Any]:
return ()
| 658 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :Dict = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCamelCase :Tuple = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCamelCase :List[Any] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 658 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__snake_case = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__snake_case = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Tuple = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 0.0
UpperCamelCase :int = n_correct / len(SCREAMING_SNAKE_CASE_ )
return {
"accuracy": accuracy,
}
| 658 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :List[Any] = original_name.split('''.''' )[0]
UpperCamelCase :Any = key.split('''.''' )
UpperCamelCase :Dict = int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
UpperCamelCase :List[str] = int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
UpperCamelCase :str = orig_block_num - offset
UpperCamelCase :Any = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
UpperCamelCase :List[str] = OrderedDict()
UpperCamelCase , UpperCamelCase :List[Any] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
UpperCamelCase :int = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
UpperCamelCase :List[str] = key[: key.find('''proj''' )]
UpperCamelCase :List[Any] = key.replace(SCREAMING_SNAKE_CASE__ , F'''patch_embeddings.{total_embed_found}.''' )
UpperCamelCase :List[Any] = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCamelCase :str = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
UpperCamelCase :Optional[int] = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
UpperCamelCase :List[str] = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
UpperCamelCase :str = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''norm1''' , '''before_norm''' )
if "norm2" in key:
UpperCamelCase :Dict = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
UpperCamelCase :Optional[Any] = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
UpperCamelCase :Optional[Any] = replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
UpperCamelCase :Dict = key.replace('''head''' , '''classifier''' )
UpperCamelCase :Any = value
return new_state_dict
def _A ( ):
UpperCamelCase :int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase :List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Dict = PoolFormerConfig()
# set attributes based on model_name
UpperCamelCase :str = '''huggingface/label-files'''
UpperCamelCase :Optional[int] = model_name[-3:]
UpperCamelCase :Tuple = 1000
UpperCamelCase :List[str] = '''imagenet-1k-id2label.json'''
UpperCamelCase :int = (1, 1000)
# set config attributes
UpperCamelCase :List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :str = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :List[Any] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCamelCase :Union[str, Any] = [2, 2, 6, 2]
UpperCamelCase :List[Any] = [64, 128, 320, 512]
UpperCamelCase :Union[str, Any] = 4.0
UpperCamelCase :Optional[Any] = 0.9
elif size == "s24":
UpperCamelCase :Tuple = [4, 4, 12, 4]
UpperCamelCase :int = [64, 128, 320, 512]
UpperCamelCase :Any = 4.0
UpperCamelCase :List[Any] = 0.9
elif size == "s36":
UpperCamelCase :Union[str, Any] = [6, 6, 18, 6]
UpperCamelCase :Dict = [64, 128, 320, 512]
UpperCamelCase :Union[str, Any] = 4.0
UpperCamelCase :Dict = 1e-6
UpperCamelCase :List[str] = 0.9
elif size == "m36":
UpperCamelCase :str = [6, 6, 18, 6]
UpperCamelCase :str = [96, 192, 384, 768]
UpperCamelCase :int = 4.0
UpperCamelCase :Optional[Any] = 1e-6
UpperCamelCase :Optional[int] = 0.95
elif size == "m48":
UpperCamelCase :int = [8, 8, 24, 8]
UpperCamelCase :Optional[Any] = [96, 192, 384, 768]
UpperCamelCase :Optional[int] = 4.0
UpperCamelCase :Optional[Any] = 1e-6
UpperCamelCase :List[str] = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
UpperCamelCase :int = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
UpperCamelCase :Tuple = prepare_img()
UpperCamelCase :int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
UpperCamelCase :Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase :Optional[int] = rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
UpperCamelCase :List[str] = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
UpperCamelCase :str = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCamelCase :Union[str, Any] = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
UpperCamelCase :Optional[int] = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
UpperCamelCase :int = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
UpperCamelCase :str = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
UpperCamelCase :List[str] = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 658 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ):
UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase :Dict = parser.parse_args()
return args.f
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ):
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
raise ValueError(F'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_glue.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[Any] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_clm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase :Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_summarization_flax.main()
UpperCamelCase :str = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :List[str] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_mlm_flax.main()
UpperCamelCase :Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase :int = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_ta_mlm_flax.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase :Tuple = 7 if get_gpu_count() > 1 else 2
UpperCamelCase :int = self.get_auto_remove_tmp_dir()
UpperCamelCase :Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_flax_ner.main()
UpperCamelCase :Any = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase :Dict = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ):
run_qa.main()
UpperCamelCase :int = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 658 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] ='upernet'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.4 , SCREAMING_SNAKE_CASE_=384 , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=255 , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase :Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[int] = backbone_config.get('''model_type''' )
UpperCamelCase :str = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase :int = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = backbone_config
UpperCamelCase :Any = hidden_size
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :int = pool_scales
UpperCamelCase :List[Any] = use_auxiliary_head
UpperCamelCase :List[Any] = auxiliary_loss_weight
UpperCamelCase :Any = auxiliary_in_channels
UpperCamelCase :int = auxiliary_channels
UpperCamelCase :Optional[Any] = auxiliary_num_convs
UpperCamelCase :Optional[int] = auxiliary_concat_input
UpperCamelCase :Any = loss_ignore_index
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Tuple = copy.deepcopy(self.__dict__ )
UpperCamelCase :List[str] = self.backbone_config.to_dict()
UpperCamelCase :Any = self.__class__.model_type
return output
| 658 |
from __future__ import annotations
from collections.abc import Callable
def _A ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 100 , ):
UpperCamelCase :Optional[Any] = x_start
UpperCamelCase :Any = fnc(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase :Any = (x_end - x_start) / steps + xa
UpperCamelCase :Dict = fnc(SCREAMING_SNAKE_CASE__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase :Optional[int] = xa
UpperCamelCase :List[str] = fxa
return area
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__snake_case = 10
while i <= 10_00_00:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 658 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 658 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =(CMStochasticIterativeScheduler,)
UpperCamelCase_ : Any =10
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[Any] = 10
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Dict = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps[0]
UpperCamelCase :Union[str, Any] = scheduler.timesteps[1]
UpperCamelCase :str = self.dummy_sample
UpperCamelCase :List[str] = 0.1 * sample
UpperCamelCase :List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ) -> List[str]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[Any] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = scheduler.timesteps
UpperCamelCase :Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = self.dummy_model()
UpperCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase :List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :Tuple = pred_prev_sample
UpperCamelCase :Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Dict = self.scheduler_classes[0]
UpperCamelCase :Optional[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = scheduler.timesteps
UpperCamelCase :int = torch.manual_seed(0 )
UpperCamelCase :str = self.dummy_model()
UpperCamelCase :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase :List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase :Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase :int = pred_prev_sample
UpperCamelCase :Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :Tuple = self.get_scheduler_config()
UpperCamelCase :List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :List[str] = self.scheduler_classes[0]
UpperCamelCase :List[Any] = self.get_scheduler_config()
UpperCamelCase :Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [39, 30, 12, 1, 0]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Optional[int] = self.scheduler_classes[0]
UpperCamelCase :List[str] = self.get_scheduler_config()
UpperCamelCase :Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 658 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.