code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(UpperCamelCase__ )
return len(UpperCamelCase__ ) == 9 and set(UpperCamelCase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase = logging.get_logger(__name__)
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : List[Any] = ["input_features", "attention_mask"]
def __init__( self , lowerCAmelCase_=8_0 , lowerCAmelCase_=1_6_0_0_0 , lowerCAmelCase_=8_0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__)
a_ =num_mel_bins
a_ =do_ceptral_normalize
a_ =normalize_means
a_ =normalize_vars
a_ =True
def lowercase_ ( self , lowerCAmelCase_ , ) -> np.ndarray:
"""simple docstring"""
a_ =waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
a_ =torch.from_numpy(UpperCAmelCase__).unsqueeze(0)
a_ =ta_kaldi.fbank(UpperCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
a_ =x[:input_length].mean(axis=0)
a_ =np.subtract(UpperCAmelCase__ , UpperCAmelCase__)
if normalize_vars:
a_ =x[:input_length].std(axis=0)
a_ =np.divide(UpperCAmelCase__ , UpperCAmelCase__)
if input_length < x.shape[0]:
a_ =padding_value
# make sure array is in float32
a_ =x.astype(np.floataa)
return x
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[np.ndarray]:
"""simple docstring"""
a_ =attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase__ , UpperCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
a_ =isinstance(UpperCAmelCase__ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
a_ =is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
a_ =[np.asarray(UpperCAmelCase__ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray):
a_ =np.asarray(UpperCAmelCase__ , dtype=np.floataa)
elif isinstance(UpperCAmelCase__ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
a_ =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
a_ =[raw_speech]
# extract fbank features
a_ =[self._extract_fbank_features(UpperCAmelCase__) for waveform in raw_speech]
# convert into correct format for padding
a_ =BatchFeature({"input_features": features})
a_ =self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
a_ =padded_inputs.get("input_features")
if isinstance(input_features[0] , UpperCAmelCase__):
a_ =[np.asarray(UpperCAmelCase__ , dtype=np.floataa) for feature in input_features]
a_ =padded_inputs.get("attention_mask")
if attention_mask is not None:
a_ =[np.asarray(UpperCAmelCase__ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
a_ =(
np.array(UpperCAmelCase__ , dtype=np.intaa)
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__) is not PaddingStrategy.DO_NOT_PAD
else None
)
a_ =self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCAmelCase__)
if return_tensors is not None:
a_ =padded_inputs.convert_to_tensors(UpperCAmelCase__)
return padded_inputs
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = LEDTokenizer
__magic_name__ : Dict = LEDTokenizerFast
__magic_name__ : Union[str, Any] = True
def lowercase_ ( self) -> int:
"""simple docstring"""
super().setUp()
a_ =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a_ =dict(zip(A__ , range(len(A__))))
a_ =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a_ ={"unk_token": "<unk>"}
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(A__) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(A__))
def lowercase_ ( self , **lowerCAmelCase_) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__)
def lowercase_ ( self , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A__)
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def lowercase_ ( self) -> str:
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384")
@cached_property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384")
@require_torch
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =["A long paragraph for summarization.", "Another paragraph for summarization."]
a_ =[0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ =tokenizer(A__ , max_length=len(A__) , padding=A__ , return_tensors="pt")
self.assertIsInstance(A__ , A__)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
a_ =batch.input_ids.tolist()[0]
self.assertListEqual(A__ , A__)
@require_torch
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ =tokenizer(A__ , padding=A__ , return_tensors="pt")
self.assertIn("input_ids" , A__)
self.assertIn("attention_mask" , A__)
self.assertNotIn("labels" , A__)
self.assertNotIn("decoder_attention_mask" , A__)
@require_torch
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =[
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ =tokenizer(text_target=A__ , max_length=3_2 , padding="max_length" , return_tensors="pt")
self.assertEqual(3_2 , targets["input_ids"].shape[1])
@require_torch
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ =tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=A__ , truncation=A__ , return_tensors="pt")
self.assertIsInstance(A__ , A__)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2))
@require_torch
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =["A long paragraph for summarization."]
a_ =[
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ =tokenizer(A__ , return_tensors="pt")
a_ =tokenizer(text_target=A__ , return_tensors="pt")
a_ =inputs["input_ids"]
a_ =targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def lowercase_ ( self) -> int:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ =["Summary of the text.", "Another summary."]
a_ =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
a_ =tokenizer(A__ , padding=A__)
a_ =[[0] * len(A__) for x in encoded_output["input_ids"]]
a_ =tokenizer.pad(A__)
self.assertSequenceEqual(outputs["global_attention_mask"] , A__)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
pass
def lowercase_ ( self) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a_ =self.rust_tokenizer_class.from_pretrained(A__ , **A__)
a_ =self.tokenizer_class.from_pretrained(A__ , **A__)
a_ ="A, <mask> AllenNLP sentence."
a_ =tokenizer_r.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__)
a_ =tokenizer_p.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__)
self.assertEqual(sum(tokens_r["token_type_ids"]) , sum(tokens_p["token_type_ids"]))
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]) , sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]) , )
a_ =tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
a_ =tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
A__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(
A__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=3 , lowerCAmelCase_=3_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_0 , lowerCAmelCase_=[8, 1_6, 3_2, 6_4] , lowerCAmelCase_=[1, 1, 2, 1] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=["stage2", "stage3", "stage4"] , lowerCAmelCase_=[2, 3, 4] , lowerCAmelCase_=1 , ) -> List[Any]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =image_size
a_ =num_channels
a_ =embeddings_size
a_ =hidden_sizes
a_ =depths
a_ =is_training
a_ =use_labels
a_ =hidden_act
a_ =num_labels
a_ =scope
a_ =len(lowerCAmelCase_)
a_ =out_features
a_ =out_indices
a_ =num_groups
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.num_labels)
a_ =self.get_config()
return config, pixel_values, labels
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =BitModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =self.num_labels
a_ =BitForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =BitBackbone(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
a_ =None
a_ =BitBackbone(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
a_ , a_ , a_ =config_and_inputs
a_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__magic_name__ : Dict = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : Dict = False
__magic_name__ : Union[str, Any] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : Union[str, Any] = False
__magic_name__ : Any = False
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =BitModelTester(self)
a_ =ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds")
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings")
def lowercase_ ( self) -> Dict:
"""simple docstring"""
pass
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(lowerCAmelCase_)
a_ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ =[*signature.parameters.keys()]
a_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(config=lowerCAmelCase_)
for name, module in model.named_modules():
if isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
a_ =model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
a_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ =self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
a_ =["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ =layer_type
a_ =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
@unittest.skip(reason="Bit does not use feedforward chunking")
def lowercase_ ( self) -> int:
"""simple docstring"""
pass
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =BitModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def UpperCAmelCase_ ( ):
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowerCAmelCase_)
a_ =self.default_image_processor
a_ =prepare_img()
a_ =image_processor(images=lowerCAmelCase_ , return_tensors="pt").to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
a_ =model(**lowerCAmelCase_)
# verify the logits
a_ =torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
a_ =torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4))
@require_torch
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[str] = (BitBackbone,) if is_torch_available() else ()
__magic_name__ : Any = BitConfig
__magic_name__ : Optional[int] = False
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =BitModelTester(self)
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
class UpperCAmelCase :
'''simple docstring'''
def __init__( self) -> Union[str, Any]:
"""simple docstring"""
a_ =""
a_ =""
a_ =[]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
a_ =self.__min_dist_top_down_dp(m - 1 , n - 1)
else:
a_ =self.__min_dist_top_down_dp(__UpperCamelCase , n - 1)
a_ =self.__min_dist_top_down_dp(m - 1 , __UpperCamelCase)
a_ =self.__min_dist_top_down_dp(m - 1 , n - 1)
a_ =1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
return self.dp[m][n]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =worda
a_ =worda
a_ =[[-1 for _ in range(len(__UpperCamelCase))] for _ in range(len(__UpperCamelCase))]
return self.__min_dist_top_down_dp(len(__UpperCamelCase) - 1 , len(__UpperCamelCase) - 1)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =worda
a_ =worda
a_ =len(__UpperCamelCase)
a_ =len(__UpperCamelCase)
a_ =[[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0: # first string is empty
a_ =j
elif j == 0: # second string is empty
a_ =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
a_ =self.dp[i - 1][j - 1]
else:
a_ =self.dp[i][j - 1]
a_ =self.dp[i - 1][j]
a_ =self.dp[i - 1][j - 1]
a_ =1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
return self.dp[m][n]
if __name__ == "__main__":
lowercase = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowercase = input('''Enter the first string: ''').strip()
lowercase = input('''Enter the second string: ''').strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ = 1 , lowercase__ = 1 , lowercase__ = 1.0E4 , lowercase__ = False , lowercase__ = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a_ =float(embedding_dim // 2 )
a_ =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a_ =min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
a_ =jnp.expand_dims(lowerCAmelCase_ , 1 ) * jnp.expand_dims(lowerCAmelCase_ , 0 )
# scale embeddings
a_ =scale * emb
if flip_sin_to_cos:
a_ =jnp.concatenate([jnp.cos(lowerCAmelCase_ ), jnp.sin(lowerCAmelCase_ )] , axis=1 )
else:
a_ =jnp.concatenate([jnp.sin(lowerCAmelCase_ ), jnp.cos(lowerCAmelCase_ )] , axis=1 )
a_ =jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_ )[0], embedding_dim] )
return signal
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int = 32
__magic_name__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1")(lowercase__)
a_ =nn.silu(lowercase__)
a_ =nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2")(lowercase__)
return temb
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
__magic_name__ : int = 32
__magic_name__ : bool = False
__magic_name__ : float = 1
@nn.compact
def __call__( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return get_sinusoidal_embeddings(
lowercase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
a_ =torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE__ , filename="pytorch_model.bin" ) )
a_ ={}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
a_ ="""roberta_prelayernorm.""" + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
a_ =tensor_value
a_ =RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ , state_dict=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert tokenizer
a_ =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__=2_8_1_2_3 ):
'''simple docstring'''
a_ =[1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
a_ =set()
a_ =0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(a_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=1_8 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=False , ) -> Union[str, Any]:
"""simple docstring"""
a_ =size if size is not None else {"height": 2_0, "width": 2_0}
a_ =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =image_size
a_ =min_resolution
a_ =max_resolution
a_ =do_resize
a_ =size
a_ =do_center_crop
a_ =crop_size
a_ =do_normalize
a_ =image_mean
a_ =image_std
a_ =do_reduce_labels
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a_ =Image.open(dataset[0]["file"] )
a_ =Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a_ =Image.open(ds[0]["file"] )
a_ =Image.open(ds[1]["file"] )
a_ =Image.open(ds[2]["file"] )
a_ =Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =BeitImageProcessingTester(self)
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "size"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_center_crop"))
self.assertTrue(hasattr(lowerCAmelCase_ , "center_crop"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std"))
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 2_0, "width": 2_0})
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8})
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase_)
a_ =self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=lowerCAmelCase_)
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2})
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4})
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
pass
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
a_ =[]
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
a_ =image_processing(image_inputs[0] , maps[0] , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 2_5_5)
# Test batched
a_ =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 2_5_5)
# Test not batched input (PIL images)
a_ , a_ =prepare_semantic_single_inputs()
a_ =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 2_5_5)
# Test batched input (PIL images)
a_ , a_ =prepare_semantic_batch_inputs()
a_ =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 2_5_5)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
a_ , a_ =prepare_semantic_single_inputs()
a_ =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 1_5_0)
a_ =True
a_ =image_processing(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 2_5_5)
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =tempfile.mkdtemp()
a_ =5
# Realm tok
a_ =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ =os.path.join(self.tmpdirname , "realm_tokenizer")
os.makedirs(__a , exist_ok=__a)
a_ =os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
a_ =os.path.join(self.tmpdirname , "realm_block_records")
os.makedirs(__a , exist_ok=__a)
def lowercase_ ( self) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer"))
def lowercase_ ( self) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =RealmConfig(num_block_records=self.num_block_records)
return config
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
})
return dataset
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.get_config()
a_ =self.get_dummy_retriever()
a_ =retriever.tokenizer
a_ =np.array([0, 3] , dtype="long")
a_ =tokenizer(["Test question"]).input_ids
a_ =tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
a_ =config.reader_seq_len
a_ =retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np")
self.assertEqual(len(__a) , 2)
self.assertEqual(len(__a) , 2)
self.assertEqual(len(__a) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.get_config()
a_ =self.get_dummy_retriever()
a_ =retriever.tokenizer
a_ =np.array([0, 3, 5] , dtype="long")
a_ =tokenizer(["Test question"]).input_ids
a_ =tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
a_ =config.reader_seq_len
a_ =retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np")
self.assertEqual([False, True, True] , __a)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records"))
# Test local path
a_ =retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records"))
self.assertEqual(retriever.block_records[0] , b"This is the first record")
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download") as mock_hf_hub_download:
a_ =os.path.join(
os.path.join(self.tmpdirname , "realm_block_records") , _REALM_BLOCK_RECORDS_FILENAME)
a_ =RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa")
self.assertEqual(retriever.block_records[0] , b"This is the first record")
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = '''▁'''
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : str = BigBirdTokenizer
__magic_name__ : Any = BigBirdTokenizerFast
__magic_name__ : List[str] = True
__magic_name__ : Dict = True
def lowercase_ ( self) -> int:
"""simple docstring"""
super().setUp()
a_ =self.tokenizer_class(_snake_case , keep_accents=_snake_case)
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ ="<s>"
a_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "[MASK]")
self.assertEqual(len(_snake_case) , 1_0_0_4)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a_ =self.get_tokenizer()
a_ =self.get_rust_tokenizer()
a_ ="I was born in 92000, and this is falsé."
a_ =tokenizer.tokenize(_snake_case)
a_ =rust_tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
a_ =tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
a_ =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
a_ =self.get_rust_tokenizer()
a_ =tokenizer.encode(_snake_case)
a_ =rust_tokenizer.encode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =BigBirdTokenizer(_snake_case , keep_accents=_snake_case)
a_ =tokenizer.tokenize("This is a test")
self.assertListEqual(_snake_case , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
a_ =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a_ =tokenizer.convert_tokens_to_ids(_snake_case)
self.assertListEqual(
_snake_case , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
a_ =tokenizer.convert_ids_to_tokens(_snake_case)
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ ( self) -> int:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ ="Hello World!"
a_ =[6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case))
@slow
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
a_ =[6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case))
@require_torch
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
a_ =list(self.big_tokenizer.get_vocab().keys())[:1_0]
a_ =" ".join(_snake_case)
a_ =self.big_tokenizer.encode_plus(_snake_case , return_tensors="pt" , return_token_type_ids=_snake_case)
a_ =self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_snake_case)
a_ =BigBirdConfig(attention_type="original_full")
a_ =BigBirdModel(_snake_case)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_snake_case)
model(**_snake_case)
@slow
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
a_ =tokenizer.decode(tokenizer("Paris is the [MASK].").input_ids)
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]")
@slow
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ ={"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
from math import factorial
def UpperCAmelCase_ ( lowercase__ = 2_0 ) -> List[Any]:
'''simple docstring'''
a_ =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
a_ =n // 2
return int(factorial(lowercase__ ) / (factorial(lowercase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCAmelCase ( UpperCamelCase__):
'''simple docstring'''
__magic_name__ : Dict = 42
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__):
'''simple docstring'''
__magic_name__ : Any = True
@register_to_config
def __init__( self , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = ("DownEncoderBlock2D",) , lowerCAmelCase_ = ("UpDecoderBlock2D",) , lowerCAmelCase_ = (6_4,) , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "silu" , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 3_2 , lowerCAmelCase_ = 3_2 , lowerCAmelCase_ = 0.1_8_2_1_5 , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
a_ =Encoder(
in_channels=_a , out_channels=_a , down_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , double_z=_a , )
# pass init params to Decoder
a_ =Decoder(
in_channels=_a , out_channels=_a , up_block_types=_a , block_out_channels=_a , layers_per_block=_a , norm_num_groups=_a , act_fn=_a , )
a_ =nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
a_ =nn.Convad(_a , _a , 1)
a_ =False
a_ =False
# only relevant if vae tiling is enabled
a_ =self.config.sample_size
a_ =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
a_ =int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
a_ =0.2_5
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> List[str]:
"""simple docstring"""
if isinstance(_a , (Encoder, Decoder)):
a_ =value
def lowercase_ ( self , lowerCAmelCase_ = True) -> Any:
"""simple docstring"""
a_ =use_tiling
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
self.enable_tiling(_a)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =True
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
a_ ={}
def fn_recursive_add_processors(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
if hasattr(_a , "set_processor"):
a_ =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _a , _a)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a)
return processors
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =len(self.attn_processors.keys())
if isinstance(_a , _a) and len(_a) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_a)} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""")
def fn_recursive_attn_processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
if hasattr(_a , "set_processor"):
if not isinstance(_a , _a):
module.set_processor(_a)
else:
module.set_processor(processor.pop(f"""{name}.processor"""))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _a , _a)
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_a , return_dict=_a)
if self.use_slicing and x.shape[0] > 1:
a_ =[self.encoder(_a) for x_slice in x.split(1)]
a_ =torch.cat(_a)
else:
a_ =self.encoder(_a)
a_ =self.quant_conv(_a)
a_ =DiagonalGaussianDistribution(_a)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_a , return_dict=_a)
a_ =self.post_quant_conv(_a)
a_ =self.decoder(_a)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a)
@apply_forward_hook
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
a_ =[self._decode(_a).sample for z_slice in z.split(1)]
a_ =torch.cat(_a)
else:
a_ =self._decode(_a).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_a)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =min(a.shape[2] , b.shape[2] , _a)
for y in range(_a):
a_ =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =min(a.shape[3] , b.shape[3] , _a)
for x in range(_a):
a_ =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> AutoencoderKLOutput:
"""simple docstring"""
a_ =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
a_ =int(self.tile_latent_min_size * self.tile_overlap_factor)
a_ =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a_ =[]
for i in range(0 , x.shape[2] , _a):
a_ =[]
for j in range(0 , x.shape[3] , _a):
a_ =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a_ =self.encoder(_a)
a_ =self.quant_conv(_a)
row.append(_a)
rows.append(_a)
a_ =[]
for i, row in enumerate(_a):
a_ =[]
for j, tile in enumerate(_a):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a_ =self.blend_v(rows[i - 1][j] , _a , _a)
if j > 0:
a_ =self.blend_h(row[j - 1] , _a , _a)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(_a , dim=3))
a_ =torch.cat(_a , dim=2)
a_ =DiagonalGaussianDistribution(_a)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = True) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
a_ =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
a_ =int(self.tile_sample_min_size * self.tile_overlap_factor)
a_ =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a_ =[]
for i in range(0 , z.shape[2] , _a):
a_ =[]
for j in range(0 , z.shape[3] , _a):
a_ =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a_ =self.post_quant_conv(_a)
a_ =self.decoder(_a)
row.append(_a)
rows.append(_a)
a_ =[]
for i, row in enumerate(_a):
a_ =[]
for j, tile in enumerate(_a):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a_ =self.blend_v(rows[i - 1][j] , _a , _a)
if j > 0:
a_ =self.blend_h(row[j - 1] , _a , _a)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(_a , dim=3))
a_ =torch.cat(_a , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
a_ =sample
a_ =self.encode(_a).latent_dist
if sample_posterior:
a_ =posterior.sample(generator=_a)
else:
a_ =posterior.mode()
a_ =self.decode(_a).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a)
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase = 0
lowercase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase = tuple[int, int]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None:
"""simple docstring"""
a_ =pos_x
a_ =pos_y
a_ =(pos_y, pos_x)
a_ =goal_x
a_ =goal_y
a_ =g_cost
a_ =parent
a_ =self.calculate_heuristic()
a_ =self.g_cost + self.h_cost
def lowercase_ ( self) -> float:
"""simple docstring"""
a_ =self.pos_x - self.goal_x
a_ =self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase) + abs(_lowerCamelCase)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self , lowerCAmelCase_) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _lowerCamelCase)
a_ =Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _lowerCamelCase)
a_ =[self.start]
a_ =[]
a_ =False
def lowercase_ ( self) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a_ =self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase)
self.closed_nodes.append(_lowerCamelCase)
a_ =self.get_successors(_lowerCamelCase)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase)
else:
# retrieve the best current path
a_ =self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase)
else:
self.open_nodes.append(_lowerCamelCase)
return [self.start.pos]
def lowercase_ ( self , lowerCAmelCase_) -> list[Node]:
"""simple docstring"""
a_ =[]
for action in delta:
a_ =parent.pos_x + action[1]
a_ =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_lowerCamelCase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase , _lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _lowerCamelCase , ))
return successors
def lowercase_ ( self , lowerCAmelCase_) -> list[TPosition]:
"""simple docstring"""
a_ =node
a_ =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
a_ =current_node.parent
path.reverse()
return path
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =AStar(_lowerCamelCase , _lowerCamelCase)
a_ =AStar(_lowerCamelCase , _lowerCamelCase)
a_ =False
def lowercase_ ( self) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a_ =self.fwd_astar.open_nodes.pop(0)
a_ =self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase , _lowerCamelCase)
self.fwd_astar.closed_nodes.append(_lowerCamelCase)
self.bwd_astar.closed_nodes.append(_lowerCamelCase)
a_ =current_bwd_node
a_ =current_fwd_node
a_ ={
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase)
else:
# retrieve the best current path
a_ =astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase)
else:
astar.open_nodes.append(_lowerCamelCase)
return [self.fwd_astar.start.pos]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> list[TPosition]:
"""simple docstring"""
a_ =self.fwd_astar.retrace_path(_lowerCamelCase)
a_ =self.bwd_astar.retrace_path(_lowerCamelCase)
bwd_path.pop()
bwd_path.reverse()
a_ =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase = (0, 0)
lowercase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase = time.time()
lowercase = AStar(init, goal)
lowercase = a_star.search()
lowercase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
lowercase = time.time()
lowercase = BidirectionalAStar(init, goal)
lowercase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def UpperCAmelCase_ ( lowercase__ = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda lowercase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Tuple = OpenAIGPTTokenizer
__magic_name__ : int = OpenAIGPTTokenizerFast
__magic_name__ : int = True
__magic_name__ : List[Any] = False
def lowercase_ ( self) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
a_ =dict(zip(__A , range(len(__A))))
a_ =["#version: 0.2", "l o", "lo w", "e r</w>", ""]
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w") as fp:
fp.write(json.dumps(__A))
with open(self.merges_file , "w") as fp:
fp.write("\n".join(__A))
def lowercase_ ( self , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
a_ ="lower"
a_ =["low", "er</w>"]
a_ =tokenizer.tokenize(__A)
self.assertListEqual(__A , __A)
a_ =tokens + ["<unk>"]
a_ =[1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , __A)
def lowercase_ ( self , lowerCAmelCase_=1_5) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a_ =self.rust_tokenizer_class.from_pretrained(__A , **__A)
# Simple input
a_ ="This is a simple input"
a_ =["This is a simple input 1", "This is a simple input 2"]
a_ =("This is a simple input", "This is a pair")
a_ =[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length")
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length")
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length")
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length")
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__):
'''simple docstring'''
pass
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[False] * len(_UpperCAmelCase )
a_ =[-1] * len(_UpperCAmelCase )
def dfs(lowercase__ , lowercase__ ):
a_ =True
a_ =c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCAmelCase , 1 - c )
for i in range(len(_UpperCAmelCase ) ):
if not visited[i]:
dfs(_UpperCAmelCase , 0 )
for i in range(len(_UpperCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowercase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowercase__ ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowercase__ ):
http_head("https://huggingface.co" )
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def UpperCAmelCase_ ( ):
'''simple docstring'''
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase ( __snake_case):
'''simple docstring'''
__magic_name__ : Tuple = "mobilenet_v2"
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=2_2_4 , lowerCAmelCase_=1.0 , lowerCAmelCase_=8 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=3_2 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu6" , lowerCAmelCase_=True , lowerCAmelCase_=0.8 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=2_5_5 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(**_lowercase)
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero.")
a_ =num_channels
a_ =image_size
a_ =depth_multiplier
a_ =depth_divisible_by
a_ =min_depth
a_ =expand_ratio
a_ =output_stride
a_ =first_layer_is_expansion
a_ =finegrained_output
a_ =hidden_act
a_ =tf_padding
a_ =classifier_dropout_prob
a_ =initializer_range
a_ =layer_norm_eps
a_ =semantic_loss_ignore_index
class UpperCAmelCase ( __snake_case):
'''simple docstring'''
__magic_name__ : Union[str, Any] = version.parse("1.11")
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})])
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})])
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
@property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return 1e-4
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =text, pattern
a_ =len(A__), len(A__)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def lowercase_ ( self , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =[]
for i in range(self.textLen - self.patLen + 1):
a_ =self.mismatch_in_text(A__)
if mismatch_index == -1:
positions.append(A__)
else:
a_ =self.match_in_pattern(self.text[mismatch_index])
a_ =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase = 'ABAABA'
lowercase = 'AB'
lowercase = BoyerMooreSearch(text, pattern)
lowercase = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Optional[Any]:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(_a)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(_a , generator=_a , device=_a)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=_a , device=_a)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(_a , generator=_a , device=_a),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(_a)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=_a , device=_a)
return dummy_input
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**_a)
unet_block.to(_a)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**_a)
if isinstance(_a , _a):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(_a).to(_a)
assert torch_all_close(output_slice.flatten() , _a , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**_a)
model.to(_a)
model.train()
a_ =model(**_a)
if isinstance(_a , _a):
a_ =output[0]
a_ =torch.device(_a)
a_ =randn_tensor(output.shape , device=_a)
a_ =torch.nn.functional.mse_loss(_a , _a)
loss.backward()
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase = 50_003
lowercase = 50_002
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = PLBartTokenizer
__magic_name__ : List[Any] = None
__magic_name__ : Tuple = False
def lowercase_ ( self) -> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a_ =PLBartTokenizer(UpperCamelCase__ , language_codes="base" , keep_accents=UpperCamelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =PLBartTokenizer(UpperCamelCase__ , language_codes="base" , keep_accents=UpperCamelCase__)
a_ =tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
a_ =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a_ =tokenizer.convert_tokens_to_ids(UpperCamelCase__)
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
a_ =tokenizer.convert_ids_to_tokens(UpperCamelCase__)
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
a_ =tokenizer.vocab_size
a_ =[tokenizer.convert_ids_to_tokens(UpperCamelCase__) for x in range(end - 4 , UpperCamelCase__)]
self.assertListEqual(UpperCamelCase__ , ["__java__", "__python__", "__en_XX__", "<mask>"])
a_ ="java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
a_ =tokenizer(UpperCamelCase__).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) , UpperCamelCase__ , )
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =PLBartTokenizer(UpperCamelCase__ , language_codes="multi" , keep_accents=UpperCamelCase__)
a_ =tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
a_ =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a_ =tokenizer.convert_tokens_to_ids(UpperCamelCase__)
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
a_ =tokenizer.convert_ids_to_tokens(UpperCamelCase__)
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
a_ =tokenizer.vocab_size
a_ =[tokenizer.convert_ids_to_tokens(UpperCamelCase__) for x in range(end - 7 , UpperCamelCase__)]
self.assertListEqual(
UpperCamelCase__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
a_ ="java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
a_ =tokenizer(UpperCamelCase__).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) , UpperCamelCase__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = "uclanlp/plbart-python-en_XX"
__magic_name__ : Dict = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
__magic_name__ : int = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
__magic_name__ : Union[str, Any] = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase_ ( cls) -> Optional[int]:
"""simple docstring"""
a_ =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX")
a_ =1
return cls
def lowercase_ ( self) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__)
def lowercase_ ( self) -> Any:
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids)
a_ =[EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
a_ =self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__)
a_ =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , UpperCamelCase__)
a_ =1_0
a_ =self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , UpperCamelCase__)
self.assertEqual(len(UpperCamelCase__) , UpperCamelCase__)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [5_0_0_0_4, 5_0_0_0_1])
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =tempfile.mkdtemp()
a_ =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__)
a_ =PLBartTokenizer.from_pretrained(UpperCamelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__)
@require_torch
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="pt")
a_ =shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , UpperCamelCase__)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
a_ =shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
self.assertEqual((2, 2_6) , batch.input_ids.shape)
self.assertEqual((2, 2_6) , batch.attention_mask.shape)
a_ =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors="pt")
a_ =self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=1_0 , return_tensors="pt")
a_ =targets["input_ids"]
a_ =shift_tokens_right(UpperCamelCase__ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java")
self.assertEqual(
nested_simplify(UpperCamelCase__) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
from itertools import permutations
def UpperCAmelCase_ ( lowercase__ ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a_ =[7, 1_1, 1_3, 1_7]
for i, test in enumerate(lowercase__ ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase_ ( lowercase__ = 1_0 ):
return sum(
int("".join(map(lowercase__ , lowercase__ ) ) )
for num in permutations(range(lowercase__ ) )
if is_substring_divisible(lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ ="\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open("w" ).writelines(SCREAMING_SNAKE_CASE_ )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class UpperCAmelCase ( _UpperCAmelCase):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =Path(self.get_auto_remove_tmp_dir()) / "utest_input.source"
a_ =input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a_ =[" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowercase__ , lowercase__)
a_ =str(Path(self.get_auto_remove_tmp_dir()) / "scores.json")
a_ ="translation_en_to_de" if model == T5_TINY else "summarization"
a_ =f"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split()
with patch.object(lowercase__ , "argv" , lowercase__):
run_generate()
assert Path(lowercase__).exists()
# os.remove(Path(output_file_name))
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
self.run_eval_tester(lowercase__)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def lowercase_ ( self , lowerCAmelCase_) -> str:
"""simple docstring"""
self.run_eval_tester(lowercase__)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =Path(self.get_auto_remove_tmp_dir()) / "utest_input.source"
a_ =input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a_ ={
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
a_ =Path(self.get_auto_remove_tmp_dir())
a_ =str(tmp_dir / "scores.json")
a_ =str(tmp_dir / "val.target")
_dump_articles(lowercase__ , text["en"])
_dump_articles(lowercase__ , text["de"])
a_ ="translation_en_to_de" if model == T5_TINY else "summarization"
a_ =f"""\n run_eval_search.py\n {model}\n {str(lowercase__)}\n {str(lowercase__)}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"])
with patch.object(lowercase__ , "argv" , lowercase__):
with CaptureStdout() as cs:
run_search()
a_ =[" num_beams | length_penalty", model, "Best score args"]
a_ =["Info"]
if "translation" in task:
expected_strings.append("bleu")
else:
expected_strings.extend(lowercase__)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase__).exists()
os.remove(Path(lowercase__))
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase = 16
lowercase = 32
def UpperCAmelCase_ ( lowercase__ , lowercase__ = 1_6 , lowercase__ = "bert-base-cased" ):
'''simple docstring'''
a_ =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
a_ =load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
a_ =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a_ =datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
a_ =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
a_ =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
model.eval()
a_ =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a_ =model(**SCREAMING_SNAKE_CASE_ )
a_ =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
a_ =accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
a_ =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a_ =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
a_ =metric.compute()
return eval_metric["accuracy"]
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ =config['lr']
a_ =int(config["num_epochs"] )
a_ =int(config["seed"] )
a_ =int(config["batch_size"] )
a_ =args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
a_ =get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ =AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
a_ =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a_ =optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
a_ =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
a_ =1
a_ =(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a_ =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
a_ =DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ =accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
a_ =0
# We also need to keep track of the stating epoch so files are named properly
a_ =0
a_ =evaluate.load("glue" , "mrpc" )
a_ =num_epochs
if args.partial_train_epoch is not None:
a_ =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
a_ =args.resume_from_checkpoint.split("epoch_" )[1]
a_ =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
a_ =int(SCREAMING_SNAKE_CASE_ ) + 1
a_ =evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint performance:" , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint\'s scheduler\'s lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers\'s lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , "r" ) as f:
a_ =json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
a_ ={}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
a_ =model(**SCREAMING_SNAKE_CASE_ )
a_ =outputs.loss
a_ =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
a_ =F"""epoch_{epoch}"""
a_ =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
a_ =evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a_ =accuracy
a_ =lr_scheduler.get_lr()[0]
a_ =optimizer.param_groups[0]['lr']
a_ =epoch
a_ =overall_step
accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="Number of train epochs." , )
a_ =parser.parse_args()
a_ ={'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , __lowercase )
a_ =datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a_ =dataset_size < in_memory_max_size
else:
a_ =False
a_ =is_small_dataset(__lowercase )
assert result == expected
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import json
import sys
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(__snake_case , encoding="utf-8" ) as f:
a_ =json.load(__snake_case )
a_ =["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(__snake_case ):
a_ =results[benchmark_name]
a_ =benchmark_name.split("/" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
a_ ="| metric |"
a_ ="|--------|"
a_ ="| new / old (diff) |"
for metric_name in sorted(__snake_case ):
a_ =benchmark_res[metric_name]
a_ =metric_vals["new"]
a_ =metric_vals.get("old" , __snake_case )
a_ =metric_vals.get("diff" , __snake_case )
a_ =F""" {new_val:f}""" if isinstance(__snake_case , (int, float) ) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(__snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(__snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(__snake_case ) )
if __name__ == "__main__":
lowercase = sys.argv[1]
lowercase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = CTRLTokenizer
__magic_name__ : int = False
__magic_name__ : List[str] = False
def lowercase_ ( self) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
a_ =dict(zip(snake_case__ , range(len(snake_case__))))
a_ =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
a_ ={"unk_token": "<unk>"}
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(snake_case__) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(snake_case__))
def lowercase_ ( self , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case__)
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ ="adapt react readapt apt"
a_ ="adapt react readapt apt"
return input_text, output_text
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a_ ="adapt react readapt apt"
a_ ="adapt re@@ a@@ c@@ t re@@ adapt apt".split()
a_ =tokenizer.tokenize(snake_case__)
self.assertListEqual(snake_case__ , snake_case__)
a_ =tokens + [tokenizer.unk_token]
a_ =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__) , snake_case__)
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase = 10
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =0
a_ =len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ =(left + right) // 3 + 1
a_ =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
a_ =one_third - 1
elif array[two_third] < target:
a_ =two_third + 1
else:
a_ =one_third + 1
a_ =two_third - 1
else:
return -1
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ =(left + right) // 3 + 1
a_ =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = input('''Enter numbers separated by comma:\n''').strip()
lowercase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowercase = int(input('''Enter the number to be found in the list:\n''').strip())
lowercase = ite_ternary_search(collection, target)
lowercase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
import os
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.join(os.path.dirname(_snake_case ) , "num.txt" )
with open(_snake_case ) as file_hand:
return str(sum(int(_snake_case ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=[3_0, 3_0] , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1_0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=8 , lowerCAmelCase_=1_0 , ) -> List[Any]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =image_size
a_ =patch_size
a_ =num_channels
a_ =is_training
a_ =use_labels
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_labels
a_ =scope
a_ =n_targets
a_ =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
a_ =(image_size[1] // patch_size) * (image_size[0] // patch_size)
a_ =num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
a_ =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
a_ =[]
for i in range(self.batch_size):
a_ ={}
a_ =torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_SCREAMING_SNAKE_CASE)
a_ =torch.rand(self.n_targets , 4 , device=_SCREAMING_SNAKE_CASE)
labels.append(_SCREAMING_SNAKE_CASE)
a_ =self.get_config()
return config, pixel_values, labels
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =YolosModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
a_ =model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =YolosForObjectDetection(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
a_ =model(pixel_values=_SCREAMING_SNAKE_CASE)
a_ =model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
a_ =model(pixel_values=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
a_ , a_ , a_ =config_and_inputs
a_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__magic_name__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : int = False
__magic_name__ : Optional[int] = False
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> Any:
"""simple docstring"""
a_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
a_ =[]
for i in range(self.model_tester.batch_size):
a_ ={}
a_ =torch.ones(
size=(self.model_tester.n_targets,) , device=_SCREAMING_SNAKE_CASE , dtype=torch.long)
a_ =torch.ones(
self.model_tester.n_targets , 4 , device=_SCREAMING_SNAKE_CASE , dtype=torch.float)
labels.append(_SCREAMING_SNAKE_CASE)
a_ =labels
return inputs_dict
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =YolosModelTester(self)
a_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=3_7)
def lowercase_ ( self) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> Any:
"""simple docstring"""
pass
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(_SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear))
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(_SCREAMING_SNAKE_CASE)
a_ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ =[*signature.parameters.keys()]
a_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
a_ =True
# in YOLOS, the seq_len is different
a_ =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
a_ =True
a_ =False
a_ =True
a_ =model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
a_ =outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ =True
a_ =model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
a_ =outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a_ =len(_SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
a_ =True
a_ =True
a_ =model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
a_ =1
self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE))
a_ =outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
a_ =model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
a_ =outputs.hidden_states
a_ =getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
# YOLOS has a different seq_length
a_ =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_SCREAMING_SNAKE_CASE)
@slow
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =YolosModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def UpperCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowercase_ ( self) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("hustvl/yolos-small") if is_vision_available() else None
@slow
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =YolosForObjectDetection.from_pretrained("hustvl/yolos-small").to(_SCREAMING_SNAKE_CASE)
a_ =self.default_image_processor
a_ =prepare_img()
a_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
a_ =model(inputs.pixel_values)
# verify outputs
a_ =torch.Size((1, 1_0_0, 9_2))
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
a_ =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_SCREAMING_SNAKE_CASE , )
a_ =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
# verify postprocessing
a_ =image_processor.post_process_object_detection(
_SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
a_ =torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1]).to(_SCREAMING_SNAKE_CASE)
a_ =[7_5, 7_5, 1_7, 6_3, 1_7]
a_ =torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5]).to(_SCREAMING_SNAKE_CASE)
self.assertEqual(len(results["scores"]) , 5)
self.assertTrue(torch.allclose(results["scores"] , _SCREAMING_SNAKE_CASE , atol=1e-4))
self.assertSequenceEqual(results["labels"].tolist() , _SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(results["boxes"][0, :] , _SCREAMING_SNAKE_CASE))
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=_A):
'''simple docstring'''
__magic_name__ : Any = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class UpperCAmelCase ( metaclass=_A):
'''simple docstring'''
__magic_name__ : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class UpperCAmelCase ( metaclass=_A):
'''simple docstring'''
__magic_name__ : List[str] = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class UpperCAmelCase ( metaclass=_A):
'''simple docstring'''
__magic_name__ : Any = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> str:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> str:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class UpperCAmelCase ( metaclass=_A):
'''simple docstring'''
__magic_name__ : List[str] = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> str:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class UpperCAmelCase ( metaclass=_A):
'''simple docstring'''
__magic_name__ : Dict = ["torch", "transformers", "onnx"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=3 , lowerCAmelCase_=3_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_0 , lowerCAmelCase_=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_=[1, 1, 2, 1] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=3 , lowerCAmelCase_=None , ) -> Optional[int]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =image_size
a_ =num_channels
a_ =embeddings_size
a_ =hidden_sizes
a_ =depths
a_ =is_training
a_ =use_labels
a_ =hidden_act
a_ =num_labels
a_ =scope
a_ =len(_A)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ =self.get_config()
return config, pixel_values
def lowercase_ ( self) -> int:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =FlaxRegNetModel(config=_A)
a_ =model(_A)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.num_labels
a_ =FlaxRegNetForImageClassification(config=_A)
a_ =model(_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
a_ =config_and_inputs
a_ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( snake_case__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : List[str] = False
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =FlaxRegNetModelTester(self)
a_ =ConfigTester(self , config_class=_A , has_text_modality=_A)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def lowercase_ ( self) -> str:
"""simple docstring"""
pass
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(_A)
a_ =inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ =[*signature.parameters.keys()]
a_ =['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
a_ =model_class(_A)
a_ =model(**self._prepare_for_class(_A , _A))
a_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ =self.model_tester.num_stages
self.assertEqual(len(_A) , expected_num_stages + 1)
a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ =True
check_hidden_states_output(_A , _A , _A)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a_ =self._prepare_for_class(_A , _A)
a_ =model_class(_A)
@jax.jit
def model_jitted(lowerCAmelCase_ , **lowerCAmelCase_):
return model(pixel_values=_A , **_A)
with self.subTest("JIT Enabled"):
a_ =model_jitted(**_A).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
a_ =model_jitted(**_A).to_tuple()
self.assertEqual(len(_A) , len(_A))
for jitted_output, output in zip(_A , _A):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
a_ =self.default_image_processor
a_ =prepare_img()
a_ =image_processor(images=_A , return_tensors="np")
a_ =model(**_A)
# verify the logits
a_ =(1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , _A)
a_ =jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return (data["data"], data["target"])
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(__snake_case , __snake_case )
# Predict target for test data
a_ =xgb.predict(__snake_case )
a_ =predictions.reshape(len(__snake_case ) , 1 )
return predictions
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =fetch_california_housing()
a_ , a_ =data_handling(__snake_case )
a_ , a_ , a_ , a_ =train_test_split(
__snake_case , __snake_case , test_size=0.25 , random_state=1 )
a_ =xgboost(__snake_case , __snake_case , __snake_case )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(__snake_case , __snake_case )}""" )
print(F"""Mean Square Error : {mean_squared_error(__snake_case , __snake_case )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCAmelCase_ ( lowercase__ , lowercase__="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) as f:
a_ =json.load(lowerCamelCase_ )
a_ ={}
a_ =[]
a_ =[]
for key, info in class_info.items():
a_ =info["""name"""]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(lowerCamelCase_ ) )
a_ =thing_ids
a_ =class_names
return metadata
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=1_0 , lowerCAmelCase_=False , lowerCAmelCase_=2_5_5 , lowerCAmelCase_="shi-labs/oneformer_demo" , lowerCAmelCase_="ade20k_panoptic.json" , lowerCAmelCase_=1_0 , ) -> Union[str, Any]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =min_resolution
a_ =max_resolution
a_ =do_resize
a_ ={"""shortest_edge""": 3_2, """longest_edge""": 1_3_3_3} if size is None else size
a_ =do_normalize
a_ =image_mean
a_ =image_std
a_ =class_info_file
a_ =prepare_metadata(lowerCamelCase_ , lowerCamelCase_)
a_ =num_text
a_ =repo_path
# for the post_process_functions
a_ =2
a_ =1_0
a_ =1_0
a_ =3
a_ =4
a_ =num_labels
a_ =do_reduce_labels
a_ =ignore_index
def lowercase_ ( self) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Any:
"""simple docstring"""
if not batched:
a_ =image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image):
a_ =image.size
else:
a_ =image.shape[1], image.shape[2]
if w < h:
a_ =int(self.size["shortest_edge"] * h / w)
a_ =self.size["""shortest_edge"""]
elif w > h:
a_ =self.size["""shortest_edge"""]
a_ =int(self.size["shortest_edge"] * w / h)
else:
a_ =self.size["""shortest_edge"""]
a_ =self.size["""shortest_edge"""]
else:
a_ =[]
for image in image_inputs:
a_ =self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a_ =max(lowerCamelCase_ , key=lambda lowerCAmelCase_: item[0])[0]
a_ =max(lowerCamelCase_ , key=lambda lowerCAmelCase_: item[1])[1]
return expected_height, expected_width
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)) , )
@require_torch
@require_vision
class UpperCAmelCase ( lowercase_ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__magic_name__ : Dict = image_processing_class
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =OneFormerImageProcessorTester(self)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase_ , "image_mean"))
self.assertTrue(hasattr(lowerCamelCase_ , "image_std"))
self.assertTrue(hasattr(lowerCamelCase_ , "do_normalize"))
self.assertTrue(hasattr(lowerCamelCase_ , "do_resize"))
self.assertTrue(hasattr(lowerCamelCase_ , "size"))
self.assertTrue(hasattr(lowerCamelCase_ , "ignore_index"))
self.assertTrue(hasattr(lowerCamelCase_ , "class_info_file"))
self.assertTrue(hasattr(lowerCamelCase_ , "num_text"))
self.assertTrue(hasattr(lowerCamelCase_ , "repo_path"))
self.assertTrue(hasattr(lowerCamelCase_ , "metadata"))
self.assertTrue(hasattr(lowerCamelCase_ , "do_reduce_labels"))
def lowercase_ ( self) -> Dict:
"""simple docstring"""
pass
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image)
# Test not batched input
a_ =image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt").pixel_values
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_)
a_ =image_processor(
lowerCamelCase_ , ["semantic"] * len(lowerCamelCase_) , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray)
# Test not batched input
a_ =image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt").pixel_values
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_)
a_ =image_processor(
lowerCamelCase_ , ["semantic"] * len(lowerCamelCase_) , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor)
# Test not batched input
a_ =image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt").pixel_values
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =self.image_processing_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_)
a_ =image_processor(
lowerCamelCase_ , ["semantic"] * len(lowerCamelCase_) , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_="np") -> Union[str, Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# prepare image and target
a_ =self.image_processing_tester.num_labels
a_ =None
a_ =None
a_ =prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase_)
if with_segmentation_maps:
a_ =num_labels
if is_instance_map:
a_ =list(range(lowerCamelCase_)) * 2
a_ =dict(enumerate(lowerCamelCase_))
a_ =[
np.random.randint(0 , high * 2 , (img.size[1], img.size[0])).astype(np.uinta) for img in image_inputs
]
if segmentation_type == "pil":
a_ =[Image.fromarray(lowerCamelCase_) for annotation in annotations]
a_ =image_processor(
lowerCamelCase_ , ["semantic"] * len(lowerCamelCase_) , lowerCamelCase_ , return_tensors="pt" , instance_id_to_semantic_id=lowerCamelCase_ , pad_and_return_pixel_mask=lowerCamelCase_ , )
return inputs
def lowercase_ ( self) -> str:
"""simple docstring"""
pass
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
def common(lowerCAmelCase_=False , lowerCAmelCase_=None):
a_ =self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCamelCase_ , is_instance_map=lowerCamelCase_ , segmentation_type=lowerCamelCase_)
a_ =inputs["""mask_labels"""]
a_ =inputs["""class_labels"""]
a_ =inputs["""pixel_values"""]
a_ =inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
self.assertEqual(mask_label.shape[0] , class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:])
self.assertEqual(len(lowerCamelCase_) , self.image_processing_tester.num_text)
common()
common(is_instance_map=lowerCamelCase_)
common(is_instance_map=lowerCamelCase_ , segmentation_type="pil")
common(is_instance_map=lowerCamelCase_ , segmentation_type="pil")
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =np.zeros((2_0, 5_0))
a_ =1
a_ =1
a_ =1
a_ =binary_mask_to_rle(lowerCamelCase_)
self.assertEqual(len(lowerCamelCase_) , 4)
self.assertEqual(rle[0] , 2_1)
self.assertEqual(rle[1] , 4_5)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
a_ =self.image_processing_tester.get_fake_oneformer_outputs()
a_ =fature_extractor.post_process_semantic_segmentation(lowerCamelCase_)
self.assertEqual(len(lowerCamelCase_) , self.image_processing_tester.batch_size)
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a_ =[(1, 4) for i in range(self.image_processing_tester.batch_size)]
a_ =fature_extractor.post_process_semantic_segmentation(lowerCamelCase_ , target_sizes=lowerCamelCase_)
self.assertEqual(segmentation[0].shape , target_sizes[0])
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
a_ =self.image_processing_tester.get_fake_oneformer_outputs()
a_ =image_processor.post_process_instance_segmentation(lowerCamelCase_ , threshold=0)
self.assertTrue(len(lowerCamelCase_) == self.image_processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]) , lowerCamelCase_)
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width))
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
a_ =self.image_processing_tester.get_fake_oneformer_outputs()
a_ =image_processor.post_process_panoptic_segmentation(lowerCamelCase_ , threshold=0)
self.assertTrue(len(lowerCamelCase_) == self.image_processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]) , lowerCamelCase_)
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width))
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( __lowerCamelCase):
'''simple docstring'''
__magic_name__ : Dict = 'char'
__magic_name__ : Union[str, Any] = 'bpe'
__magic_name__ : str = 'wp'
lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( __lowerCamelCase):
'''simple docstring'''
__magic_name__ : Optional[int] = ['image_processor', 'char_tokenizer']
__magic_name__ : int = 'ViTImageProcessor'
__magic_name__ : Any = 'MgpstrTokenizer'
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase_ , )
a_ =kwargs.pop("feature_extractor")
a_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
a_ =tokenizer
a_ =AutoTokenizer.from_pretrained("gpt2")
a_ =AutoTokenizer.from_pretrained("bert-base-uncased")
super().__init__(UpperCamelCase_ , UpperCamelCase_)
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> str:
"""simple docstring"""
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
a_ =self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_)
if text is not None:
a_ =self.char_tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_)
if text is None:
return inputs
elif images is None:
return encodings
else:
a_ =encodings["input_ids"]
return inputs
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ , a_ , a_ =sequences
a_ =char_preds.size(0)
a_ , a_ =self._decode_helper(UpperCamelCase_ , "char")
a_ , a_ =self._decode_helper(UpperCamelCase_ , "bpe")
a_ , a_ =self._decode_helper(UpperCamelCase_ , "wp")
a_ =[]
a_ =[]
for i in range(UpperCamelCase_):
a_ =[char_scores[i], bpe_scores[i], wp_scores[i]]
a_ =[char_strs[i], bpe_strs[i], wp_strs[i]]
a_ =scores.index(max(UpperCamelCase_))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
a_ ={}
a_ =final_strs
a_ =final_scores
a_ =char_strs
a_ =bpe_strs
a_ =wp_strs
return out
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
if format == DecodeType.CHARACTER:
a_ =self.char_decode
a_ =1
a_ ="[s]"
elif format == DecodeType.BPE:
a_ =self.bpe_decode
a_ =2
a_ ="#"
elif format == DecodeType.WORDPIECE:
a_ =self.wp_decode
a_ =1_0_2
a_ ="[SEP]"
else:
raise ValueError(f"""Format {format} is not supported.""")
a_ , a_ =[], []
a_ =pred_logits.size(0)
a_ =pred_logits.size(1)
a_ , a_ =pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase_ , sorted=UpperCamelCase_)
a_ =preds_index.view(-1 , UpperCamelCase_)[:, 1:]
a_ =decoder(UpperCamelCase_)
a_ , a_ =torch.nn.functional.softmax(UpperCamelCase_ , dim=2).max(dim=2)
a_ =preds_max_prob[:, 1:]
for index in range(UpperCamelCase_):
a_ =preds_str[index].find(UpperCamelCase_)
a_ =preds_str[index][:pred_eos]
a_ =preds_index[index].cpu().tolist()
a_ =pred_index.index(UpperCamelCase_) if eos_token in pred_index else -1
a_ =preds_max_prob[index][: pred_eos_index + 1]
a_ =pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase_)
conf_scores.append(UpperCamelCase_)
return dec_strs, conf_scores
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =[seq.replace(" " , "") for seq in self.char_tokenizer.batch_decode(UpperCamelCase_)]
return decode_strs
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(UpperCamelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =[seq.replace(" " , "") for seq in self.wp_tokenizer.batch_decode(UpperCamelCase_)]
return decode_strs
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowercase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
lowercase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowercase = '''allenai'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =dict((re.sub(r"@@$" , "" , lowerCamelCase__ ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , lowerCamelCase__ ), v) for k, v in d.items() )
a_ ="<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
a_ =d[k] # restore
return da
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert os.path.exists(lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
a_ =basename(lowerCamelCase__ )
a_ =dirname(lowerCamelCase__ )
a_ =fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a_ =cls.hub_models()
a_ ={"bpe": "fastbpe", "tokenizer": "moses"}
a_ ="."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
a_ =hub_utils.from_pretrained(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , archive_map=lowerCamelCase__ , **lowerCamelCase__ )
a_ =vars(chkpt["args"]["model"] )
a_ =args["source_lang"]
a_ =args["target_lang"]
a_ =dirname(lowerCamelCase__ )
a_ =basename(lowerCamelCase__ )
# dicts
a_ =os.path.join(lowerCamelCase__ , F"""dict.{src_lang}.txt""" )
a_ =os.path.join(lowerCamelCase__ , F"""dict.{tgt_lang}.txt""" )
a_ =Dictionary.load(lowerCamelCase__ )
a_ =rewrite_dict_keys(src_dict.indices )
a_ =len(lowerCamelCase__ )
a_ =os.path.join(lowerCamelCase__ , "vocab-src.json" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a_ =True
for k in src_vocab.keys():
if not k.islower():
a_ =False
break
a_ =Dictionary.load(lowerCamelCase__ )
a_ =rewrite_dict_keys(tgt_dict.indices )
a_ =len(lowerCamelCase__ )
a_ =os.path.join(lowerCamelCase__ , "vocab-tgt.json" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__ ) )
# merges_file (bpecodes)
a_ =os.path.join(lowerCamelCase__ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a_ =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if os.path.exists(lowerCamelCase__ ):
break
with open(lowerCamelCase__ , encoding="utf-8" ) as fin:
a_ =fin.read()
a_ =re.sub(r" \d+$" , "" , lowerCamelCase__ , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as fout:
fout.write(lowerCamelCase__ )
# model config
a_ =os.path.join(lowerCamelCase__ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
a_ ={
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
a_ =5
a_ =False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a_ =best_score_hparams[model_dir]["length_penalty"]
else:
a_ =1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__ ) )
# tokenizer config
a_ =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
a_ ={
"langs": [src_lang, tgt_lang],
"model_max_length": 1_0_2_4,
"do_lower_case": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__ ) )
# model
a_ =chkpt["models"][0]
a_ =model.state_dict()
# rename keys to start with 'model.'
a_ =OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a_ =[
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
a_ =FSMTConfig.from_pretrained(lowerCamelCase__ )
a_ =FSMTForConditionalGeneration(lowerCamelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
# save
a_ =os.path.join(lowerCamelCase__ , lowerCamelCase__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowercase = logging.get_logger(__name__)
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_5_5 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_)
a_ =size if size is not None else {"shortest_edge": 2_2_4}
a_ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a_ =crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
a_ =get_size_dict(lowercase_ , param_name="crop_size")
a_ =do_resize
a_ =size
a_ =resample
a_ =do_rescale
a_ =rescale_factor
a_ =do_center_crop
a_ =crop_size
a_ =do_flip_channel_order
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PIL.Image.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
"""simple docstring"""
a_ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""")
a_ =get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
"""simple docstring"""
a_ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> int:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(lowercase_ , data_format=lowercase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
"""simple docstring"""
a_ =do_resize if do_resize is not None else self.do_resize
a_ =resample if resample is not None else self.resample
a_ =do_rescale if do_rescale is not None else self.do_rescale
a_ =rescale_factor if rescale_factor is not None else self.rescale_factor
a_ =do_center_crop if do_center_crop is not None else self.do_center_crop
a_ =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
a_ =size if size is not None else self.size
a_ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a_ =crop_size if crop_size is not None else self.crop_size
a_ =get_size_dict(lowercase_ , param_name="crop_size")
a_ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
a_ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a_ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a_ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a_ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
a_ =[self.flip_channel_order(image=lowercase_) for image in images]
a_ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a_ ={"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Optional[Any]:
"""simple docstring"""
a_ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(lowercase_):
a_ =target_sizes.numpy()
a_ =[]
for idx in range(len(lowercase_)):
a_ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_)
a_ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a_ =logits.argmax(dim=1)
a_ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@classmethod
def lowercase_ ( cls) -> int:
"""simple docstring"""
a_ =TOKEN
HfFolder.save_token(lowerCAmelCase_)
@classmethod
def lowercase_ ( cls) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-model-flax")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org")
except HTTPError:
pass
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
a_ =FlaxBertModel(lowerCAmelCase_)
model.push_to_hub("test-model-flax" , use_auth_token=self._token)
a_ =FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""")
a_ =flatten_dict(unfreeze(model.params))
a_ =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
a_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 , msg=f"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase_ , repo_id="test-model-flax" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token)
a_ =FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""")
a_ =flatten_dict(unfreeze(model.params))
a_ =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
a_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 , msg=f"""{key} not identical""")
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
a_ =FlaxBertModel(lowerCAmelCase_)
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token)
a_ =FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
a_ =flatten_dict(unfreeze(model.params))
a_ =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
a_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 , msg=f"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase_ , repo_id="valid_org/test-model-flax-org" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token)
a_ =FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
a_ =flatten_dict(unfreeze(model.params))
a_ =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
a_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 , msg=f"""{key} not identical""")
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =True
a_ =flatten_dict(modela.params )
a_ =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
a_ =False
return models_are_equal
@require_flax
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
a_ =FlaxBertModel(lowerCAmelCase_)
a_ ="bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_))
with self.assertRaises(lowerCAmelCase_):
a_ =FlaxBertModel.from_pretrained(lowerCAmelCase_)
a_ =FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_)
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_))
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
a_ =FlaxBertModel(lowerCAmelCase_)
a_ ="bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_) , max_shard_size="10KB")
with self.assertRaises(lowerCAmelCase_):
a_ =FlaxBertModel.from_pretrained(lowerCAmelCase_)
a_ =FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_)
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_))
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ ="bert"
a_ ="hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(lowerCAmelCase_):
a_ =FlaxBertModel.from_pretrained(lowerCAmelCase_)
a_ =FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ ="bert"
a_ ="hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(lowerCAmelCase_):
a_ =FlaxBertModel.from_pretrained(lowerCAmelCase_)
a_ =FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = 'blip_text_model'
def __init__( self , lowerCAmelCase_=3_0_5_2_4 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=3_0_7_2 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_="gelu" , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3_0_5_2_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0 , lowerCAmelCase_=1_0_2 , lowerCAmelCase_=True , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , sep_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ =vocab_size
a_ =hidden_size
a_ =encoder_hidden_size
a_ =intermediate_size
a_ =projection_dim
a_ =hidden_dropout_prob
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =max_position_embeddings
a_ =layer_norm_eps
a_ =hidden_act
a_ =initializer_range
a_ =attention_probs_dropout_prob
a_ =is_decoder
a_ =use_cache
@classmethod
def lowercase_ ( cls , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase__)
a_ =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type") == "blip":
a_ =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Tuple = 'blip_vision_model'
def __init__( self , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=3_0_7_2 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3_8_4 , lowerCAmelCase_=1_6 , lowerCAmelCase_="gelu" , lowerCAmelCase_=1e-5 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1e-10 , **lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase__)
a_ =hidden_size
a_ =intermediate_size
a_ =projection_dim
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =patch_size
a_ =image_size
a_ =initializer_range
a_ =attention_dropout
a_ =layer_norm_eps
a_ =hidden_act
@classmethod
def lowercase_ ( cls , lowerCAmelCase_ , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase__)
a_ =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type") == "blip":
a_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : List[str] = 'blip'
__magic_name__ : Tuple = True
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2.6_5_9_2 , lowerCAmelCase_=2_5_6 , **lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase__)
if text_config is None:
a_ ={}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.")
if vision_config is None:
a_ ={}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.")
a_ =BlipTextConfig(**UpperCAmelCase__)
a_ =BlipVisionConfig(**UpperCAmelCase__)
a_ =self.vision_config.hidden_size
a_ =projection_dim
a_ =logit_scale_init_value
a_ =1.0
a_ =0.0_2
a_ =image_text_hidden_size
@classmethod
def lowercase_ ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =copy.deepcopy(self.__dict__)
a_ =self.text_config.to_dict()
a_ =self.vision_config.to_dict()
a_ =self.__class__.model_type
return output
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
import math
import qiskit
def UpperCAmelCase_ ( lowercase__ = 1 , lowercase__ = 1 , lowercase__ = 1 ):
'''simple docstring'''
if (
isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
a_ =qiskit.QuantumRegister(4 , "qr" )
a_ =qiskit.ClassicalRegister(2 , "cr" )
# list the entries
a_ =[input_a, input_a, carry_in]
a_ =qiskit.QuantumCircuit(lowercase__ , lowercase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase__ ) # measure the last two qbits
a_ =qiskit.Aer.get_backend("aer_simulator" )
a_ =qiskit.execute(lowercase__ , lowercase__ , shots=1_0_0_0 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE_) for s in shape])}.npy"""
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowercase_ ( self , lowerCAmelCase_=0 , lowerCAmelCase_=(4, 4, 6_4, 6_4) , lowerCAmelCase_=False) -> List[str]:
"""simple docstring"""
a_ =jnp.bfloataa if fpaa else jnp.floataa
a_ =jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)) , dtype=SCREAMING_SNAKE_CASE_)
return image
def lowercase_ ( self , lowerCAmelCase_=False , lowerCAmelCase_="CompVis/stable-diffusion-v1-4") -> Any:
"""simple docstring"""
a_ =jnp.bfloataa if fpaa else jnp.floataa
a_ ="bf16" if fpaa else None
a_ , a_ =FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder="unet" , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_)
return model, params
def lowercase_ ( self , lowerCAmelCase_=0 , lowerCAmelCase_=(4, 7_7, 7_6_8) , lowerCAmelCase_=False) -> Optional[int]:
"""simple docstring"""
a_ =jnp.bfloataa if fpaa else jnp.floataa
a_ =jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)) , dtype=SCREAMING_SNAKE_CASE_)
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
])
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=SCREAMING_SNAKE_CASE_)
a_ =self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_)
a_ =self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_)
a_ =model.apply(
{"params": params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
a_ =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
a_ =jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
])
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ , a_ =self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=SCREAMING_SNAKE_CASE_)
a_ =self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 9_6, 9_6) , fpaa=SCREAMING_SNAKE_CASE_)
a_ =self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 7_7, 1_0_2_4) , fpaa=SCREAMING_SNAKE_CASE_)
a_ =model.apply(
{"params": params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
a_ =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
a_ =jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2)
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=3 , lowerCAmelCase_=2_2_4 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
a_ =size if size is not None else {"height": 1_8, "width": 1_8}
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =image_size
a_ =min_resolution
a_ =max_resolution
a_ =do_resize
a_ =size
a_ =do_normalize
a_ =image_mean
a_ =image_std
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase ( _snake_case , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Tuple = ViTImageProcessor if is_vision_available() else None
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =EfficientFormerImageProcessorTester(self)
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case_ , "image_mean"))
self.assertTrue(hasattr(snake_case_ , "image_std"))
self.assertTrue(hasattr(snake_case_ , "do_normalize"))
self.assertTrue(hasattr(snake_case_ , "do_resize"))
self.assertTrue(hasattr(snake_case_ , "size"))
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case_)
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image)
# Test not batched input
a_ =image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a_ =image_processor(snake_case_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case_ , numpify=snake_case_)
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray)
# Test not batched input
a_ =image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a_ =image_processor(snake_case_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_proc_tester , equal_resolution=snake_case_ , torchify=snake_case_)
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor)
# Test not batched input
a_ =image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a_ =image_processor(snake_case_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCAmelCase ( torch.nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_="sayef/fsner-bert-base-uncased") -> Tuple:
"""simple docstring"""
super(UpperCAmelCase__ , self).__init__()
a_ =AutoModel.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__)
a_ =torch.nn.CosineSimilarity(3 , 1e-08)
a_ =torch.nn.Softmax(dim=1)
def lowercase_ ( self , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
return self.bert(**UpperCAmelCase__).last_hidden_state
def lowercase_ ( self , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=UpperCAmelCase__)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1) -> List[str]:
"""simple docstring"""
return self.softmax(T * self.cos(UpperCAmelCase__ , UpperCAmelCase__))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =W_supports['''sizes'''].tolist()
a_ =W_supports['''start_token_id'''].item()
a_ =W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a_ =self.BERT(**UpperCAmelCase__)
a_ =self.BERT(**UpperCAmelCase__)
a_ =None
a_ =None
a_ =W_supports['''input_ids'''] == start_token_id
a_ =W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(UpperCAmelCase__):
if i == 0:
a_ =0
else:
a_ =support_sizes[i - 1]
a_ =S[s : s + size][start_token_masks[s : s + size]]
a_ =S[s : s + size][end_token_masks[s : s + size]]
a_ =torch.matmul(q[i] , s_start.T).sum(1).softmax(0)
a_ =torch.matmul(q[i] , s_end.T).sum(1).softmax(0)
if p_starts is not None:
a_ =torch.vstack((p_starts, p_start))
a_ =torch.vstack((p_ends, p_end))
else:
a_ =p_start
a_ =p_end
return p_starts, p_ends
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase_ ( lowercase__ ):
a_ =[2, 2, 6, 2] if "tiny" in model_name else [2, 2, 1_8, 2]
a_ =True if "large" in model_name or "huge" in model_name else False
a_ =True if "large" in model_name or "huge" in model_name else False
a_ =True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ =[3, 3, 3, 3]
a_ =[5, 5, 5, 5]
elif "fl4" in model_name:
a_ =[4, 4, 4, 4]
a_ =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ =[3, 3, 3, 3]
if "lrf" in model_name:
a_ =[3, 3, 3, 3]
else:
a_ =[2, 2, 2, 2]
if "tiny" in model_name:
a_ =9_6
elif "small" in model_name:
a_ =9_6
elif "base" in model_name:
a_ =1_2_8
elif "large" in model_name:
a_ =1_9_2
elif "xlarge" in model_name:
a_ =2_5_6
elif "huge" in model_name:
a_ =3_5_2
# set label information
a_ ="huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ ="imagenet-22k-id2label.json"
else:
a_ ="imagenet-1k-id2label.json"
a_ =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
a_ ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
a_ ={v: k for k, v in idalabel.items()}
a_ =FocalNetConfig(
embed_dim=_lowerCAmelCase , depths=_lowerCAmelCase , focal_levels=_lowerCAmelCase , focal_windows=_lowerCAmelCase , use_conv_embed=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , use_post_layernorm=_lowerCAmelCase , use_layerscale=_lowerCAmelCase , )
return config
def UpperCAmelCase_ ( lowercase__ ):
if "patch_embed.proj" in name:
a_ =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ =name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ ="encoder." + name
if "encoder.layers" in name:
a_ =name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ =name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ =name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ =name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ =name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ =name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ ="layernorm.weight"
if name == "norm.bias":
a_ ="layernorm.bias"
if "head" in name:
a_ =name.replace("head" , "classifier" )
else:
a_ ="focalnet." + name
return name
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False ):
# fmt: off
a_ ={
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ =model_name_to_url[model_name]
print("Checkpoint URL: " , _lowerCAmelCase )
a_ =torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ =state_dict.pop(_lowerCAmelCase )
a_ =val
a_ =get_focalnet_config(_lowerCAmelCase )
a_ =FocalNetForImageClassification(_lowerCAmelCase )
model.eval()
# load state dict
model.load_state_dict(_lowerCAmelCase )
# verify conversion
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =BitImageProcessor(
do_resize=_lowerCAmelCase , size={"shortest_edge": 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase , crop_size=2_2_4 , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , )
a_ =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
a_ =processor(images=_lowerCAmelCase , return_tensors="pt" )
a_ =transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ =image_transforms(_lowerCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowerCAmelCase , atol=1E-4 )
a_ =model(**_lowerCAmelCase )
a_ =outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ =torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ =torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ =torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ =torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ =torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ =torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowercase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=3 , lowerCAmelCase_=2_2_4 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ) -> Dict:
"""simple docstring"""
a_ =size if size is not None else {'''height''': 1_8, '''width''': 1_8}
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =image_size
a_ =min_resolution
a_ =max_resolution
a_ =do_resize
a_ =size
a_ =do_normalize
a_ =image_mean
a_ =image_std
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = ViTImageProcessor if is_vision_available() else None
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =EfficientFormerImageProcessorTester(self)
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCamelCase , "image_mean"))
self.assertTrue(hasattr(__lowerCamelCase , "image_std"))
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize"))
self.assertTrue(hasattr(__lowerCamelCase , "do_resize"))
self.assertTrue(hasattr(__lowerCamelCase , "size"))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
pass
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image)
# Test not batched input
a_ =image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a_ =image_processor(__lowerCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray)
# Test not batched input
a_ =image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a_ =image_processor(__lowerCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor)
# Test not batched input
a_ =image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a_ =image_processor(__lowerCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase = datasets.utils.logging.get_logger(__name__)
lowercase = ["names", "prefix"]
lowercase = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase = ["encoding_errors", "on_bad_lines"]
lowercase = ["date_format"]
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig):
'''simple docstring'''
__magic_name__ : str = ","
__magic_name__ : Optional[Any] = None
__magic_name__ : int = "infer"
__magic_name__ : Union[str, Any] = None
__magic_name__ : str = None
__magic_name__ : str = None
__magic_name__ : List[str] = None
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[Any] = None
__magic_name__ : str = None
__magic_name__ : Optional[int] = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : Any = False
__magic_name__ : Optional[int] = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Dict = False
__magic_name__ : Optional[int] = True
__magic_name__ : int = None
__magic_name__ : Tuple = "."
__magic_name__ : int = None
__magic_name__ : int = "\""
__magic_name__ : List[str] = 0
__magic_name__ : Optional[Any] = None
__magic_name__ : Tuple = None
__magic_name__ : str = None
__magic_name__ : Optional[Any] = None
__magic_name__ : Any = True
__magic_name__ : str = True
__magic_name__ : List[Any] = 0
__magic_name__ : int = True
__magic_name__ : Union[str, Any] = False
__magic_name__ : List[Any] = None
__magic_name__ : Dict = 10_000
__magic_name__ : Optional[Any] = None
__magic_name__ : Any = "strict"
__magic_name__ : List[Any] = "error"
__magic_name__ : List[str] = None
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
a_ =self.delimiter
if self.column_names is not None:
a_ =self.column_names
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ ={
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowercase):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCAmelCase ( datasets.ArrowBasedBuilder):
'''simple docstring'''
__magic_name__ : Union[str, Any] = CsvConfig
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
a_ =dl_manager.download_and_extract(self.config.data_files)
if isinstance(_lowercase , (str, list, tuple)):
a_ =data_files
if isinstance(_lowercase , _lowercase):
a_ =[files]
a_ =[dl_manager.iter_files(_lowercase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a_ =[]
for split_name, files in data_files.items():
if isinstance(_lowercase , _lowercase):
a_ =[files]
a_ =[dl_manager.iter_files(_lowercase) for file in files]
splits.append(datasets.SplitGenerator(name=_lowercase , gen_kwargs={"files": files}))
return splits
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
if self.config.features is not None:
a_ =self.config.features.arrow_schema
if all(not require_storage_cast(_lowercase) for feature in self.config.features.values()):
# cheaper cast
a_ =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowercase)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a_ =table_cast(_lowercase , _lowercase)
return pa_table
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a_ =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowercase) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowercase)):
a_ =pd.read_csv(_lowercase , iterator=_lowercase , dtype=_lowercase , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(_lowercase):
a_ =pa.Table.from_pandas(_lowercase)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowercase)
except ValueError as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(_lowercase)}: {e}""")
raise
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowercase = random.Random()
def UpperCAmelCase_ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if rng is None:
a_ =global_rng
a_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=2_0_0_0 , lowerCAmelCase_=1_0 , lowerCAmelCase_=1_6_0 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=4_0_0_0 , lowerCAmelCase_=False , lowerCAmelCase_=True , ) -> str:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =min_seq_length
a_ =max_seq_length
a_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ =padding_value
a_ =sampling_rate
a_ =return_attention_mask
a_ =do_normalize
a_ =feature_size
a_ =chunk_length
a_ =hop_length
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self , lowerCAmelCase_=False , lowerCAmelCase_=False) -> Dict:
"""simple docstring"""
def _flatten(lowerCAmelCase_):
return list(itertools.chain(*lowercase_))
if equal_length:
a_ =[floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
a_ =[
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a_ =[np.asarray(lowercase_) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( snake_case__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = WhisperFeatureExtractor if is_speech_available() else None
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =WhisperFeatureExtractionTester(self)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
a_ =feat_extract_first.save_pretrained(lowercase_)[0]
check_json_file_has_correct_format(lowercase_)
a_ =self.feature_extraction_class.from_pretrained(lowercase_)
a_ =feat_extract_first.to_dict()
a_ =feat_extract_second.to_dict()
a_ =feat_extract_first.mel_filters
a_ =feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_))
self.assertEqual(lowercase_ , lowercase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
a_ =os.path.join(lowercase_ , "feat_extract.json")
feat_extract_first.to_json_file(lowercase_)
a_ =self.feature_extraction_class.from_json_file(lowercase_)
a_ =feat_extract_first.to_dict()
a_ =feat_extract_second.to_dict()
a_ =feat_extract_first.mel_filters
a_ =feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_))
self.assertEqual(lowercase_ , lowercase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a_ =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
a_ =[np.asarray(lowercase_) for speech_input in speech_inputs]
# Test feature size
a_ =feature_extractor(lowercase_ , padding="max_length" , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames)
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size)
# Test not batched input
a_ =feature_extractor(speech_inputs[0] , return_tensors="np").input_features
a_ =feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
# Test batched
a_ =feature_extractor(lowercase_ , return_tensors="np").input_features
a_ =feature_extractor(lowercase_ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
# Test 2-D numpy arrays are batched.
a_ =[floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a_ =np.asarray(lowercase_)
a_ =feature_extractor(lowercase_ , return_tensors="np").input_features
a_ =feature_extractor(lowercase_ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
# Test truncation required
a_ =[floats_list((1, x))[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0)]
a_ =[np.asarray(lowercase_) for speech_input in speech_inputs]
a_ =[x[: feature_extractor.n_samples] for x in speech_inputs]
a_ =[np.asarray(lowercase_) for speech_input in speech_inputs_truncated]
a_ =feature_extractor(lowercase_ , return_tensors="np").input_features
a_ =feature_extractor(lowercase_ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
import torch
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ =np.random.rand(1_0_0 , 3_2).astype(np.floataa)
a_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
a_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
a_ =ds.sort("id").select(range(lowercase_))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
])
# fmt: on
a_ =self._load_datasamples(1)
a_ =WhisperFeatureExtractor()
a_ =feature_extractor(lowercase_ , return_tensors="pt").input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0))
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowercase_ , atol=1e-4))
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ =self._load_datasamples(1)[0]
a_ =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
a_ =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_)[0]
self.assertTrue(np.all(np.mean(lowercase_) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase_) - 1) < 1e-3))
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
lowercase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=9_9 , lowerCAmelCase_=3_6 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , lowerCAmelCase_=1_0_0_0 , ) -> Tuple:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =image_size
a_ =patch_size
a_ =is_training
a_ =use_input_mask
a_ =use_token_type_ids
a_ =use_labels
a_ =vocab_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =type_sequence_label_size
a_ =initializer_range
a_ =coordinate_size
a_ =shape_size
a_ =num_labels
a_ =num_choices
a_ =scope
a_ =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a_ =text_seq_length
a_ =(image_size // patch_size) ** 2 + 1
a_ =self.text_seq_length + self.image_seq_length
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
a_ =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
a_ =bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ =bbox[i, j, 3]
a_ =bbox[i, j, 1]
a_ =tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ =bbox[i, j, 2]
a_ =bbox[i, j, 0]
a_ =tmp_coordinate
a_ =tf.constant(__UpperCamelCase)
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ =None
if self.use_input_mask:
a_ =random_attention_mask([self.batch_size, self.text_seq_length])
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
a_ =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =TFLayoutLMvaModel(config=__UpperCamelCase)
# text + image
a_ =model(__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase)
a_ =model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , training=__UpperCamelCase , )
a_ =model(__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
a_ =model(__UpperCamelCase , training=__UpperCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
a_ =model({"pixel_values": pixel_values} , training=__UpperCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =self.num_labels
a_ =TFLayoutLMvaForSequenceClassification(config=__UpperCamelCase)
a_ =model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =self.num_labels
a_ =TFLayoutLMvaForTokenClassification(config=__UpperCamelCase)
a_ =model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =2
a_ =TFLayoutLMvaForQuestionAnswering(config=__UpperCamelCase)
a_ =model(
__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , training=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
((a_) , (a_) , (a_) , (a_) , (a_) , (a_) , (a_) , (a_)) =config_and_inputs
a_ ={
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ : List[Any] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__magic_name__ : Dict = False
__magic_name__ : int = False
__magic_name__ : List[Any] = False
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
return True
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> dict:
"""simple docstring"""
a_ =copy.deepcopy(__UpperCamelCase)
if model_class in get_values(__UpperCamelCase):
a_ ={
k: tf.tile(tf.expand_dims(__UpperCamelCase , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(__UpperCamelCase , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCamelCase):
a_ =tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(__UpperCamelCase):
a_ =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
a_ =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(__UpperCamelCase):
a_ =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(__UpperCamelCase):
a_ =tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =TFLayoutLMvaModelTester(self)
a_ =ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(__UpperCamelCase)
if getattr(__UpperCamelCase , "hf_compute_loss" , __UpperCamelCase):
# The number of elements in the loss should be the same as the number of elements in the label
a_ =self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase)
a_ =prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCamelCase)[0]
]
a_ =added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
a_ =self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase)
a_ =prepared_for_class.pop("input_ids")
a_ =model(__UpperCamelCase , **__UpperCamelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
a_ =self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase)
a_ =prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
a_ =prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
a_ =-1_0_0
a_ =tf.convert_to_tensor(__UpperCamelCase)
a_ =model(__UpperCamelCase , **__UpperCamelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
a_ =self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase)
a_ =model(__UpperCamelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
a_ =self._prepare_for_class(inputs_dict.copy() , __UpperCamelCase , return_labels=__UpperCamelCase)
# Get keys that were added with the _prepare_for_class function
a_ =prepared_for_class.keys() - inputs_dict.keys()
a_ =inspect.signature(model.call).parameters
a_ =list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
a_ ={0: "input_ids"}
for label_key in label_keys:
a_ =signature_names.index(__UpperCamelCase)
a_ =label_key
a_ =sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
a_ =[]
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
a_ =prepared_for_class[value]
a_ =tuple(__UpperCamelCase)
# Send to model
a_ =model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ =type
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
def lowercase_ ( self) -> int:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
@slow
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =TFLayoutLMvaModel.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__UpperCamelCase) if is_vision_available() else None
@slow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
a_ =self.default_image_processor
a_ =prepare_img()
a_ =image_processor(images=__UpperCamelCase , return_tensors="tf").pixel_values
a_ =tf.constant([[1, 2]])
a_ =tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
a_ =model(input_ids=__UpperCamelCase , bbox=__UpperCamelCase , pixel_values=__UpperCamelCase , training=__UpperCamelCase)
# verify the logits
a_ =(1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , __UpperCamelCase)
a_ =tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1e-4))
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase = TypeVar('''T''')
lowercase = Union[List[T], Tuple[T, ...]]
lowercase = Union[T, List[T], Dict[str, T]]
lowercase = Union[str, bytes, os.PathLike]
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowercase = datasets.logging.get_logger(__name__)
lowercase = '''\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'''
lowercase = '''\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'''
lowercase = '''\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'''
lowercase = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase ( datasets.Metric):
'''simple docstring'''
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowercase_ ( self , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').")
a_ ='bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
a_ =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
a_ =self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""")
# download the model checkpoint specified by self.config_name and set up the scorer
a_ =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
a_ =score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__)
return {"scores": scores}
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = StableDiffusionLDMaDPipeline
__magic_name__ : Any = TEXT_TO_IMAGE_PARAMS
__magic_name__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
a_ =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
a_ =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0)
a_ =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0)
a_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
a_ =CLIPTextModel(lowerCamelCase_)
a_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a_ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0) -> Dict:
"""simple docstring"""
if str(lowerCamelCase_).startswith("mps"):
a_ =torch.manual_seed(lowerCamelCase_)
else:
a_ =torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
a_ ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ ="cpu" # ensure determinism for the device-dependent torch.Generator
a_ =self.get_dummy_components()
a_ =StableDiffusionLDMaDPipeline(**lowerCamelCase_)
a_ =ldmad_pipe.to(lowerCamelCase_)
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a_ =self.get_dummy_inputs(lowerCamelCase_)
a_ =ldmad_pipe(**lowerCamelCase_)
a_ , a_ =output.rgb, output.depth
a_ =rgb[0, -3:, -3:, -1]
a_ =depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
a_ =np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2])
a_ =np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6])
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth).max() < 1e-2
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.get_dummy_components()
a_ =StableDiffusionLDMaDPipeline(**lowerCamelCase_)
a_ =ldmad_pipe.to(lowerCamelCase_)
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a_ =self.get_dummy_inputs(lowerCamelCase_)
a_ =3 * [inputs["prompt"]]
# forward
a_ =ldmad_pipe(**lowerCamelCase_)
a_ , a_ =output.rgb, output.depth
a_ =rgb_slice_a[0, -3:, -3:, -1]
a_ =depth_slice_a[0, -3:, -1]
a_ =self.get_dummy_inputs(lowerCamelCase_)
a_ =3 * [inputs.pop("prompt")]
a_ =ldmad_pipe.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="pt" , )
a_ =text_inputs["input_ids"].to(lowerCamelCase_)
a_ =ldmad_pipe.text_encoder(lowerCamelCase_)[0]
a_ =prompt_embeds
# forward
a_ =ldmad_pipe(**lowerCamelCase_)
a_ , a_ =output.rgb, output.depth
a_ =rgb_slice_a[0, -3:, -3:, -1]
a_ =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten()).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten()).max() < 1e-4
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ ="cpu" # ensure determinism for the device-dependent torch.Generator
a_ =self.get_dummy_components()
a_ =PNDMScheduler(skip_prk_steps=lowerCamelCase_)
a_ =StableDiffusionLDMaDPipeline(**lowerCamelCase_)
a_ =ldmad_pipe.to(lowerCamelCase_)
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a_ =self.get_dummy_inputs(lowerCamelCase_)
a_ ="french fries"
a_ =ldmad_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_)
a_ , a_ =output.rgb, output.depth
a_ =rgb[0, -3:, -3:, -1]
a_ =depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
a_ =np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7])
a_ =np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5])
assert np.abs(rgb_slice.flatten() - expected_slice_rgb).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0) -> int:
"""simple docstring"""
a_ =torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
a_ =np.random.RandomState(lowerCamelCase_).standard_normal((1, 4, 6_4, 6_4))
a_ =torch.from_numpy(lowerCamelCase_).to(device=lowerCamelCase_ , dtype=lowerCamelCase_)
a_ ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d")
a_ =ldmad_pipe.to(lowerCamelCase_)
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a_ =self.get_inputs(lowerCamelCase_)
a_ =ldmad_pipe(**lowerCamelCase_)
a_ , a_ =output.rgb, output.depth
a_ =rgb[0, -3:, -3:, -1].flatten()
a_ =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
a_ =np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6])
a_ =np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6])
assert np.abs(rgb_slice - expected_slice_rgb).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth).max() < 3e-3
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0) -> Dict:
"""simple docstring"""
a_ =torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
a_ =np.random.RandomState(lowerCamelCase_).standard_normal((1, 4, 6_4, 6_4))
a_ =torch.from_numpy(lowerCamelCase_).to(device=lowerCamelCase_ , dtype=lowerCamelCase_)
a_ ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 5_0,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d").to(lowerCamelCase_)
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a_ =self.get_inputs(lowerCamelCase_)
a_ =ldmad_pipe(**lowerCamelCase_)
a_ , a_ =output.rgb, output.depth
a_ =0.4_9_5_5_8_6
a_ =0.3_3_7_9_5_5_1_5
a_ =1_1_2.4_8_5_1_8
a_ =9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3
assert np.abs(expected_rgb_std - rgb.std()) < 1e-3
assert np.abs(expected_depth_mean - depth.mean()) < 1e-3
assert np.abs(expected_depth_std - depth.std()) < 1e-3
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c").to(lowerCamelCase_)
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a_ =self.get_inputs(lowerCamelCase_)
a_ =ldmad_pipe(**lowerCamelCase_)
a_ , a_ =output.rgb, output.depth
a_ =0.4_1_9_4_1_2_7
a_ =0.3_5_3_7_5_5_8_6
a_ =0.5_6_3_8_5_0_2
a_ =0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3
assert np.abs(expected_rgb_std - rgb.std()) < 1e-3
assert np.abs(expected_depth_mean - depth.mean()) < 1e-3
assert np.abs(expected_depth_std - depth.std()) < 1e-3
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def UpperCAmelCase_ ( lowercase__ = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda lowercase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
import re
import subprocess
import sys
lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowercase = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
lowercase = "|".join(sys.argv[1:])
lowercase = re.compile(RF"""^({joined_dirs}).*?\.py$""")
lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
import numpy as np
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return np.where(vector > 0 , a_ , (alpha * (np.exp(a_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ = 1_0 , lowercase__ = 1_0_0_0 , lowercase__ = True ):
'''simple docstring'''
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return int((number_a + number_a) / 2 )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(lowercase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a_ =lower
a_ =higher
a_ =[]
while True:
a_ =get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
a_ =number
elif answer(UpperCAmelCase__ ) == "high":
a_ =number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =int(input("Enter lower value : " ).strip() )
a_ =int(input("Enter high value : " ).strip() )
a_ =int(input("Enter value to guess : " ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =RemBertConfig.from_json_file(UpperCAmelCase__ )
print("Building PyTorch model from configuration: {}".format(str(UpperCAmelCase__ ) ) )
a_ =RemBertModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print("Save PyTorch model to {}".format(UpperCAmelCase__ ) )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Any = """SpeechT5FeatureExtractor"""
__magic_name__ : str = """SpeechT5Tokenizer"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =kwargs.pop("audio" , lowerCAmelCase_)
a_ =kwargs.pop("text" , lowerCAmelCase_)
a_ =kwargs.pop("text_target" , lowerCAmelCase_)
a_ =kwargs.pop("audio_target" , lowerCAmelCase_)
a_ =kwargs.pop("sampling_rate" , lowerCAmelCase_)
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?")
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?")
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.")
if audio is not None:
a_ =self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_)
elif text is not None:
a_ =self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_)
else:
a_ =None
if audio_target is not None:
a_ =self.feature_extractor(audio_target=lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =targets["input_values"]
elif text_target is not None:
a_ =self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_)
a_ =targets["input_ids"]
else:
a_ =None
if inputs is None:
return targets
if targets is not None:
a_ =labels
a_ =targets.get("attention_mask")
if decoder_attention_mask is not None:
a_ =decoder_attention_mask
return inputs
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =kwargs.pop("input_values" , lowerCAmelCase_)
a_ =kwargs.pop("input_ids" , lowerCAmelCase_)
a_ =kwargs.pop("labels" , lowerCAmelCase_)
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs.")
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.")
if input_values is not None:
a_ =self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_)
elif input_ids is not None:
a_ =self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_)
else:
a_ =None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase_ , lowerCAmelCase_) and "input_ids" in labels[0]):
a_ =self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_)
a_ =targets["input_ids"]
else:
a_ =self.feature_extractor.feature_size
a_ =self.feature_extractor.num_mel_bins
a_ =self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_)
a_ =feature_size_hack
a_ =targets["input_values"]
else:
a_ =None
if inputs is None:
return targets
if targets is not None:
a_ =labels
a_ =targets.get("attention_mask")
if decoder_attention_mask is not None:
a_ =decoder_attention_mask
return inputs
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , **lowercase__ ):
'''simple docstring'''
a_ =AutoConfig.from_pretrained(lowercase__ , **lowercase__ )
a_ =AutoModelForSeqaSeqLM.from_config(lowercase__ )
model.save_pretrained(lowercase__ )
AutoTokenizer.from_pretrained(lowercase__ ).save_pretrained(lowercase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] )
return (item, float(lowercase__ ))
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =random.randint(0 , len(lowercase__ ) - 1 )
a_ =parent_a[:random_slice] + parent_a[random_slice:]
a_ =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =list(lowercase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a_ =random.choice(lowercase__ )
return "".join(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =[]
# Generate more children proportionally to the fitness score.
a_ =int(parent_a[1] * 1_0_0 ) + 1
a_ =1_0 if child_n >= 1_0 else child_n
for _ in range(lowercase__ ):
a_ =population_score[random.randint(0 , lowercase__ )][0]
a_ =crossover(parent_a[0] , lowercase__ )
# Append new string to the population list.
pop.append(mutate(lowercase__ , lowercase__ ) )
pop.append(mutate(lowercase__ , lowercase__ ) )
return pop
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
a_ =F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowercase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
a_ =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a_ =F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowercase__ )
# Generate random starting population.
a_ =[]
for _ in range(lowercase__ ):
population.append("".join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
a_ =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a_ =[evaluate(lowercase__ , lowercase__ ) for item in population]
# Check if there is a matching evolution.
a_ =sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a_ =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase__ )
# Normalize population score to be between 0 and 1.
a_ =[
(item, score / len(lowercase__ )) for item, score in population_score
]
# This is selection
for i in range(lowercase__ ):
population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowercase = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCamelCase_):
'''simple docstring'''
__magic_name__ : Tuple = ["torch", "scipy"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
requires_backends(self , ["torch", "scipy"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"])
@classmethod
def lowercase_ ( cls , *lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"])
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
a_ =DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ),
] , )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
a_ =DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , "dataset_info.json" ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
a_ =dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
a_ =yaml.safe_dump(UpperCamelCase__ )
a_ =yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =DatasetInfo()
a_ =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
a_ =DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
a_ =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
a_ =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , "README.md" ) )
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float.")
if len(lowerCAmelCase_) != 0:
a_ =len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float)):
raise error
a_ =rows
else:
a_ =[]
def lowercase_ ( self) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.rows)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.rows[0])
@property
def lowercase_ ( self) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ ( self) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
a_ =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def lowercase_ ( self) -> bool:
"""simple docstring"""
return bool(self.determinant())
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(lowerCAmelCase_).determinant()
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase_ , lowerCAmelCase_)
return -1 * self.get_minor(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowerCAmelCase_ , lowerCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
a_ =[
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(lowerCAmelCase_)
def lowercase_ ( self) -> Matrix:
"""simple docstring"""
a_ =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse")
return self.adjugate() * (1 / determinant)
def __repr__( self) -> str:
"""simple docstring"""
return str(self.rows)
def __str__( self) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(lowerCAmelCase_) for value in row]) + ".]"
for row in self.rows
])
+ "]"
)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> None:
"""simple docstring"""
a_ =TypeError("Row must be a list containing all ints and/or floats")
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase_ , (int, float)):
raise type_error
if len(lowerCAmelCase_) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix")
if position is None:
self.rows.append(lowerCAmelCase_)
else:
a_ =self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> None:
"""simple docstring"""
a_ =TypeError(
"Column must be a list containing all ints and/or floats")
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase_ , (int, float)):
raise type_error
if len(lowerCAmelCase_) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix")
if position is None:
a_ =[self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
a_ =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self , lowerCAmelCase_) -> bool:
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase_) -> bool:
"""simple docstring"""
return not self == other
def __neg__( self) -> Matrix:
"""simple docstring"""
return self * -1
def __add__( self , lowerCAmelCase_) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self , lowerCAmelCase_) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self , lowerCAmelCase_) -> Matrix:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second")
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase_ , lowerCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix")
def __pow__( self , lowerCAmelCase_) -> Matrix:
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
raise TypeError("A Matrix can only be raised to the power of an int")
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power")
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power")
a_ =self
for _ in range(other - 1):
result *= self
return result
@classmethod
def lowercase_ ( cls , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class UpperCAmelCase ( __a , __a):
'''simple docstring'''
__magic_name__ : List[Any] = "bit"
__magic_name__ : List[Any] = ["preactivation", "bottleneck"]
__magic_name__ : Optional[int] = ["SAME", "VALID"]
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=6_4 , lowerCAmelCase_=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowerCAmelCase_=[3, 4, 6, 3] , lowerCAmelCase_="preactivation" , lowerCAmelCase_="relu" , lowerCAmelCase_=None , lowerCAmelCase_=3_2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1 , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types)}""")
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
a_ =global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""")
a_ =num_channels
a_ =embedding_size
a_ =hidden_sizes
a_ =depths
a_ =layer_type
a_ =hidden_act
a_ =global_padding
a_ =num_groups
a_ =drop_path_rate
a_ =embedding_dynamic_padding
a_ =output_stride
a_ =width_factor
a_ =["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_) + 1)]
a_ , a_ =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names)
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =0
for i in range(1 , 1_0_0_1 ):
total += i**i
return str(lowercase__ )[-1_0:]
if __name__ == "__main__":
print(solution())
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
from collections.abc import Callable
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ = None) -> None:
"""simple docstring"""
a_ =[]
# Stores indexes of each item for supporting updates and deletion.
a_ ={}
# Stores current size of heap.
a_ =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
a_ =key or (lambda lowerCAmelCase_: x)
def lowercase_ ( self , lowerCAmelCase_) -> int | None:
"""simple docstring"""
return int((i - 1) / 2) if i > 0 else None
def lowercase_ ( self , lowerCAmelCase_) -> int | None:
"""simple docstring"""
a_ =int(2 * i + 1)
return left if 0 < left < self.size else None
def lowercase_ ( self , lowerCAmelCase_) -> int | None:
"""simple docstring"""
a_ =int(2 * i + 2)
return right if 0 < right < self.size else None
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ , a_ =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
a_ , a_ =self.arr[j], self.arr[i]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> bool:
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =self._left(lowerCAmelCase_)
a_ =self._right(lowerCAmelCase_)
a_ =i
if left is not None and not self._cmp(lowerCAmelCase_ , lowerCAmelCase_):
a_ =left
if right is not None and not self._cmp(lowerCAmelCase_ , lowerCAmelCase_):
a_ =right
return valid_parent
def lowercase_ ( self , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =self._parent(lowerCAmelCase_)
while parent is not None and not self._cmp(lowerCAmelCase_ , lowerCAmelCase_):
self._swap(lowerCAmelCase_ , lowerCAmelCase_)
a_ , a_ =parent, self._parent(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =self._get_valid_parent(lowerCAmelCase_)
while valid_parent != index:
self._swap(lowerCAmelCase_ , lowerCAmelCase_)
a_ , a_ =valid_parent, self._get_valid_parent(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
a_ =self.pos_map[item]
a_ =[item, self.key(lowerCAmelCase_)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase_)
self._heapify_down(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
a_ =self.pos_map[item]
del self.pos_map[item]
a_ =self.arr[self.size - 1]
a_ =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase_)
self._heapify_down(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase_)])
else:
a_ =[item, self.key(lowerCAmelCase_)]
a_ =self.size
self.size += 1
self._heapify_up(self.size - 1)
def lowercase_ ( self) -> tuple | None:
"""simple docstring"""
return self.arr[0] if self.size else None
def lowercase_ ( self) -> tuple | None:
"""simple docstring"""
a_ =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def UpperCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=2_2_4 , lowerCAmelCase_=1_0_0_0 , lowerCAmelCase_=[3, 3, 6, 4] , lowerCAmelCase_=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Any:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =is_training
a_ =use_labels
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =num_labels
a_ =image_size
a_ =layer_depths
a_ =embed_dims
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.num_labels)
a_ =self.get_config()
return config, pixel_values, labels
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase_ , layer_scale_init_value=1e-5 , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =SwiftFormerModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.num_labels
a_ =SwiftFormerForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
a_ =SwiftFormerForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self) -> str:
"""simple docstring"""
((a_) , (a_) , (a_)) =self.prepare_config_and_inputs()
a_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__magic_name__ : List[Any] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : Union[str, Any] = False
__magic_name__ : List[Any] = False
__magic_name__ : int = False
__magic_name__ : List[str] = False
__magic_name__ : List[Any] = False
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =SwiftFormerModelTester(self)
a_ =ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(lowerCAmelCase_)
a_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear))
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(lowerCAmelCase_)
a_ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ =[*signature.parameters.keys()]
a_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@slow
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =SwiftFormerModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skip(reason="SwiftFormer does not output attentions")
def lowercase_ ( self) -> Any:
"""simple docstring"""
pass
def lowercase_ ( self) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
a_ =model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
a_ =outputs.hidden_states
a_ =8
self.assertEqual(len(lowerCAmelCase_) , lowerCAmelCase_) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase_)):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
]) , )
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
def _config_zero_init(lowerCAmelCase_):
a_ =copy.deepcopy(lowerCAmelCase_)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , 1e-10)
if isinstance(getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , lowerCAmelCase_):
a_ =_config_zero_init(getattr(lowerCAmelCase_ , lowerCAmelCase_))
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return configs_no_init
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
a_ =_config_zero_init(lowerCAmelCase_)
for model_class in self.all_model_classes:
a_ =model_class(config=lowerCAmelCase_)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs") if is_vision_available() else None
@slow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs").to(lowerCAmelCase_)
a_ =self.default_image_processor
a_ =prepare_img()
a_ =image_processor(images=lowerCAmelCase_ , return_tensors="pt").to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
a_ =model(**lowerCAmelCase_)
# verify the logits
a_ =torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
a_ =torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4))
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class UpperCAmelCase :
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
pass
def lowercase_ ( self) -> int:
"""simple docstring"""
pass
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_)
a_ =TFVisionTextDualEncoderModel(lowerCAmelCase_)
a_ =model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ , a_ =self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_)
a_ =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_)
a_ =model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_)
a_ ={"vision_model": vision_model, "text_model": text_model}
a_ =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_)
a_ =model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_)
a_ =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_)
a_ =model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
a_ =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_)
a_ =TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_)
a_ =model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
a_ =after_output[0].numpy()
a_ =np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase_ , 1e-5)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_)
a_ =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_)
a_ =model(
input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_)
a_ =output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase_) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ =to_atuple(vision_model.config.image_size)
a_ =to_atuple(vision_model.config.patch_size)
a_ =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a_ =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a_ =output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase_) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =np.abs((a - b)).max()
self.assertLessEqual(lowerCAmelCase_ , lowerCAmelCase_ , f"""Difference between torch and flax is {diff} (>= {tol}).""")
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase_)
@slow
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ , a_ =self.get_pretrained_model_and_inputs()
a_ =model_a(**lowerCAmelCase_)
a_ =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase_)
a_ =TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_)
a_ =model_a(**lowerCAmelCase_)
a_ =after_outputs[0].numpy()
a_ =np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase_ , 1e-5)
@require_tf
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert")
a_ =1_3
a_ =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a_ =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a_ =random_attention_mask([batch_size, 4])
a_ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =TFViTModel(lowerCAmelCase_ , name="vision_model")
a_ =TFBertModel(lowerCAmelCase_ , name="text_model")
return vision_model, text_model
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =TFViTModelTester(self)
a_ =TFBertModelTester(self)
a_ =vit_model_tester.prepare_config_and_inputs()
a_ =bert_model_tester.prepare_config_and_inputs()
a_ , a_ , a_ =vision_config_and_inputs
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta")
a_ =1_3
a_ =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a_ =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a_ =random_attention_mask([batch_size, 4])
a_ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_) -> int:
"""simple docstring"""
a_ , a_ =self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_)
a_ =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_)
a_ =model(
input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_)
a_ =output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase_) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a_ =to_atuple(vision_model.config.image_size)
a_ =to_atuple(vision_model.config.patch_size)
a_ =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a_ =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a_ =output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase_) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =TFDeiTModel(lowerCAmelCase_ , name="vision_model")
a_ =TFRobertaModel(lowerCAmelCase_ , name="text_model")
return vision_model, text_model
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =TFDeiTModelTester(self)
a_ =TFRobertaModelTester(self)
a_ =vit_model_tester.prepare_config_and_inputs()
a_ =bert_model_tester.prepare_config_and_inputs()
a_ , a_ , a_ =vision_config_and_inputs
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert")
a_ =1_3
a_ =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a_ =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a_ =random_attention_mask([batch_size, 4])
a_ ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =TFCLIPVisionModel(lowerCAmelCase_ , name="vision_model")
a_ =TFBertModel(lowerCAmelCase_ , name="text_model")
return vision_model, text_model
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =TFCLIPVisionModelTester(self)
a_ =TFBertModelTester(self)
a_ =clip_model_tester.prepare_config_and_inputs()
a_ =bert_model_tester.prepare_config_and_inputs()
a_ , a_ =vision_config_and_inputs
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase_)
a_ =VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
a_ =processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="np")
a_ =model(**lowerCAmelCase_)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a_ =np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase_ , atol=1e-3))
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =2
a_ =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import argparse
import os
import re
lowercase = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase = re.compile(R'''\[([^\]]+)\]''')
def UpperCAmelCase_ ( lowercase__ ) -> Dict:
'''simple docstring'''
a_ =_re_indent.search(lowercase__ )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_ ( lowercase__ , lowercase__="" , lowercase__=None , lowercase__=None ) -> str:
'''simple docstring'''
a_ =0
a_ =code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(lowercase__ ):
index += 1
a_ =["\n".join(lines[:index] )]
else:
a_ =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ =[lines[index]]
index += 1
while index < len(lowercase__ ) and (end_prompt is None or not lines[index].startswith(lowercase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(lowercase__ ) )
if index < len(lowercase__ ) - 1:
a_ =[lines[index + 1]]
index += 1
else:
a_ =[]
else:
blocks.append("\n".join(lowercase__ ) )
a_ =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase__ ) > 0:
blocks.append("\n".join(lowercase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def UpperCAmelCase_ ( lowercase__ ) -> Optional[int]:
'''simple docstring'''
def _inner(lowercase__ ):
return key(lowercase__ ).lower().replace("_" , "" )
return _inner
def UpperCAmelCase_ ( lowercase__ , lowercase__=None ) -> Union[str, Any]:
'''simple docstring'''
def noop(lowercase__ ):
return x
if key is None:
a_ =noop
# Constants are all uppercase, they go first.
a_ =[obj for obj in objects if key(lowercase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ =[obj for obj in objects if key(lowercase__ )[0].isupper() and not key(lowercase__ ).isupper()]
# Functions begin with a lowercase, they go last.
a_ =[obj for obj in objects if not key(lowercase__ )[0].isupper()]
a_ =ignore_underscore(lowercase__ )
return sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ )
def UpperCAmelCase_ ( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
def _replace(lowercase__ ):
a_ =match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a_ =[part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ =keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase__ )] ) + "]"
a_ =import_statement.split("\n" )
if len(lowercase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ =2 if lines[1].strip() == "[" else 1
a_ =[(i, _re_strip_line.search(lowercase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ =sort_objects(lowercase__ , key=lambda lowercase__ : x[1] )
a_ =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ =_re_bracket_content.sub(_replace , lines[1] )
else:
a_ =[part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ =keys[:-1]
a_ =get_indent(lines[1] ) + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase__ )] )
return "\n".join(lowercase__ )
else:
# Finally we have to deal with imports fitting on one line
a_ =_re_bracket_content.sub(_replace , lowercase__ )
return import_statement
def UpperCAmelCase_ ( lowercase__ , lowercase__=True ) -> List[str]:
'''simple docstring'''
with open(lowercase__ , "r" ) as f:
a_ =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ =split_code_in_indented_blocks(
lowercase__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ =main_blocks[block_idx]
a_ =block.split("\n" )
# Get to the start of the imports.
a_ =0
while line_idx < len(lowercase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ =len(lowercase__ )
else:
line_idx += 1
if line_idx >= len(lowercase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ ="\n".join(block_lines[line_idx:-1] )
a_ =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ =split_code_in_indented_blocks(lowercase__ , indent_level=lowercase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ =_re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ =[(pattern.search(lowercase__ ).groups()[0] if pattern.search(lowercase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ =[(i, key) for i, key in enumerate(lowercase__ ) if key is not None]
a_ =[x[0] for x in sorted(lowercase__ , key=lambda lowercase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ =0
a_ =[]
for i in range(len(lowercase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a_ =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase__ )
count += 1
# And we put our main block back together with its first and last line.
a_ ="\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase__ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase__ , "w" ) as f:
f.write("\n".join(lowercase__ ) )
def UpperCAmelCase_ ( lowercase__=True ) -> Optional[int]:
'''simple docstring'''
a_ =[]
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
a_ =sort_imports(os.path.join(lowercase__ , "__init__.py" ) , check_only=lowercase__ )
if result:
a_ =[os.path.join(lowercase__ , "__init__.py" )]
if len(lowercase__ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase__ )} files, run `make style`.""" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
debug_launcher(test_script.main)
def lowercase_ ( self) -> int:
"""simple docstring"""
debug_launcher(test_ops.main)
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =data
# Initialize hash values
a_ =[
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
a_ =[
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
a_ =self.preprocessing(self.data)
self.final_hash()
@staticmethod
def lowercase_ ( lowerCAmelCase_) -> bytes:
"""simple docstring"""
a_ =b"\x80" + (b"\x00" * (6_3 - (len(lowerCAmelCase_) + 8) % 6_4))
a_ =struct.pack(">Q" , (len(lowerCAmelCase_) * 8))
return data + padding + big_endian_integer
def lowercase_ ( self) -> None:
"""simple docstring"""
a_ =[
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data) , 6_4)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
a_ =list(struct.unpack(">16L" , lowerCAmelCase_))
# add 48 0-ed integers
words += [0] * 4_8
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ =self.hashes
for index in range(0 , 6_4):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
a_ =(
self.ror(words[index - 1_5] , 7)
^ self.ror(words[index - 1_5] , 1_8)
^ (words[index - 1_5] >> 3)
)
a_ =(
self.ror(words[index - 2] , 1_7)
^ self.ror(words[index - 2] , 1_9)
^ (words[index - 2] >> 1_0)
)
a_ =(
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
a_ =self.ror(lowerCAmelCase_ , 6) ^ self.ror(lowerCAmelCase_ , 1_1) ^ self.ror(lowerCAmelCase_ , 2_5)
a_ =(e & f) ^ ((~e & 0xffff_ffff) & g)
a_ =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
a_ =self.ror(lowerCAmelCase_ , 2) ^ self.ror(lowerCAmelCase_ , 1_3) ^ self.ror(lowerCAmelCase_ , 2_2)
a_ =(a & b) ^ (a & c) ^ (b & c)
a_ =(sa + maj) % 0x1_0000_0000
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ =(
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
a_ =[a, b, c, d, e, f, g, h]
# Modify final values
a_ =[
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes)
]
a_ ="".join([hex(lowerCAmelCase_)[2:].zfill(8) for value in self.hashes])
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
return 0xffff_ffff & (value << (3_2 - rotations)) | (value >> rotations)
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> None:
"""simple docstring"""
import hashlib
a_ =bytes("Test String" , "utf-8")
self.assertEqual(SHAaaa(lowerCAmelCase_).hash , hashlib.shaaaa(lowerCAmelCase_).hexdigest())
def UpperCAmelCase_ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
a_ =argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
a_ =parser.parse_args()
a_ =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
a_ =f.read()
else:
a_ =bytes(lowercase__ , "utf-8" )
print(SHAaaa(lowercase__ ).hash )
if __name__ == "__main__":
main()
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase = None
try:
import msvcrt
except ImportError:
lowercase = None
try:
import fcntl
except ImportError:
lowercase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase = OSError
# Data
# ------------------------------------------------
lowercase = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowercase = '''3.0.12'''
lowercase = None
def UpperCAmelCase_ ( ):
'''simple docstring'''
global _logger
a_ =_logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =lock_file
return None
def __str__( self) -> List[str]:
"""simple docstring"""
a_ =f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =lock
return None
def __enter__( self) -> Optional[int]:
"""simple docstring"""
return self.lock
def __exit__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
self.lock.release()
return None
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=-1 , lowerCAmelCase_=None) -> Union[str, Any]:
"""simple docstring"""
a_ =max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
a_ =self.hash_filename_if_too_long(lowerCAmelCase_ , lowerCAmelCase_)
# The path to the lock file.
a_ =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a_ =None
# The default timeout value.
a_ =timeout
# We use this lock primarily for the lock counter.
a_ =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a_ =0
return None
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self._lock_file
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =float(lowerCAmelCase_)
return None
def lowercase_ ( self) -> int:
"""simple docstring"""
raise NotImplementedError()
def lowercase_ ( self) -> int:
"""simple docstring"""
raise NotImplementedError()
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return self._lock_file_fd is not None
def lowercase_ ( self , lowerCAmelCase_=None , lowerCAmelCase_=0.0_5) -> Union[str, Any]:
"""simple docstring"""
if timeout is None:
a_ =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a_ =id(self)
a_ =self._lock_file
a_ =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""")
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""")
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""")
raise Timeout(self._lock_file)
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""")
time.sleep(lowerCAmelCase_)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a_ =max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def lowercase_ ( self , lowerCAmelCase_=False) -> Tuple:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a_ =id(self)
a_ =self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""")
self._release()
a_ =0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""")
return None
def __enter__( self) -> Optional[int]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
self.release()
return None
def __del__( self) -> List[str]:
"""simple docstring"""
self.release(force=lowerCAmelCase_)
return None
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =os.path.basename(lowerCAmelCase_)
if len(lowerCAmelCase_) > max_length and max_length > 0:
a_ =os.path.dirname(lowerCAmelCase_)
a_ =str(hash(lowerCAmelCase_))
a_ =filename[: max_length - len(lowerCAmelCase_) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
else:
return path
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=-1 , lowerCAmelCase_=None) -> Tuple:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase_ , timeout=lowerCAmelCase_ , max_filename_length=lowerCAmelCase_)
a_ ="\\\\?\\" + relative_to_absolute_path(self.lock_file)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a_ =os.open(self._lock_file , lowerCAmelCase_)
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase_ , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(lowerCAmelCase_)
else:
a_ =fd
return None
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self._lock_file_fd
a_ =None
msvcrt.locking(lowerCAmelCase_ , msvcrt.LK_UNLCK , 1)
os.close(lowerCAmelCase_)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=-1 , lowerCAmelCase_=None) -> List[Any]:
"""simple docstring"""
a_ =os.statvfs(os.path.dirname(lowerCAmelCase_)).f_namemax
super().__init__(lowerCAmelCase_ , timeout=lowerCAmelCase_ , max_filename_length=lowerCAmelCase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
a_ =os.open(self._lock_file , lowerCAmelCase_)
try:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowerCAmelCase_)
else:
a_ =fd
return None
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self._lock_file_fd
a_ =None
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_UN)
os.close(lowerCAmelCase_)
return None
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a_ =os.open(self._lock_file , lowerCAmelCase_)
except OSError:
pass
else:
a_ =fd
return None
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
os.close(self._lock_file_fd)
a_ =None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase = None
if msvcrt:
lowercase = WindowsFileLock
elif fcntl:
lowercase = UnixFileLock
else:
lowercase = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Any = "deberta-v2"
def __init__( self , lowerCAmelCase_=1_2_8_1_0_0 , lowerCAmelCase_=1_5_3_6 , lowerCAmelCase_=2_4 , lowerCAmelCase_=2_4 , lowerCAmelCase_=6_1_4_4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-7 , lowerCAmelCase_=False , lowerCAmelCase_=-1 , lowerCAmelCase_=0 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=0 , lowerCAmelCase_="gelu" , **lowerCAmelCase_ , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =relative_attention
a_ =max_relative_positions
a_ =pad_token_id
a_ =position_biased_input
# Backwards compatibility
if type(lowerCAmelCase_) == str:
a_ =[x.strip() for x in pos_att_type.lower().split("|")]
a_ =pos_att_type
a_ =vocab_size
a_ =layer_norm_eps
a_ =kwargs.get("pooler_hidden_size" , lowerCAmelCase_)
a_ =pooler_dropout
a_ =pooler_hidden_act
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)])
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return 1_2
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 4_0 , lowerCAmelCase_ = 4_0 , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
a_ =super().generate_dummy_inputs(preprocessor=lowerCAmelCase_ , framework=lowerCAmelCase_)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =["a", "b", "c"]
# Defaults to last layer if both are None
a_ , a_ =get_aligned_output_features_output_indices(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , ["c"])
self.assertEqual(lowerCAmelCase_ , [2])
# Out indices set to match out features
a_ , a_ =get_aligned_output_features_output_indices(["a", "c"] , lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , ["a", "c"])
self.assertEqual(lowerCAmelCase_ , [0, 2])
# Out features set to match out indices
a_ , a_ =get_aligned_output_features_output_indices(lowerCAmelCase_ , [0, 2] , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , ["a", "c"])
self.assertEqual(lowerCAmelCase_ , [0, 2])
# Out features selected from negative indices
a_ , a_ =get_aligned_output_features_output_indices(lowerCAmelCase_ , [-3, -1] , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , ["a", "c"])
self.assertEqual(lowerCAmelCase_ , [-3, -1])
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCAmelCase_)
# Out features must be a list
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"])
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"])
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(lowerCAmelCase_ , 0 , ["a", "b"])
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(lowerCAmelCase_ , (0, 1) , ["a"])
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"])
# Out features should match out indices
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"])
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"])
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"])
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =BackboneMixin()
a_ =["a", "b", "c"]
a_ =["a", "c"]
a_ =[0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
a_ =["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"])
self.assertEqual(backbone.out_indices , [0, 1])
a_ =[-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"])
self.assertEqual(backbone.out_indices , [-3, -1])
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(lowercase__ ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(lowercase__ ) == 1:
return True
a_ =series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(lowercase__ ) == 0:
raise ValueError("Input list must be a non empty list" )
a_ =0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Tuple = "mctct"
def __init__( self , lowerCAmelCase_=8_0_6_5 , lowerCAmelCase_=1_5_3_6 , lowerCAmelCase_=3_6 , lowerCAmelCase_=6_1_4_4 , lowerCAmelCase_=4 , lowerCAmelCase_=3_8_4 , lowerCAmelCase_=9_2_0 , lowerCAmelCase_=1e-5 , lowerCAmelCase_=0.3 , lowerCAmelCase_="relu" , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=0.3 , lowerCAmelCase_=0.3 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0.3 , lowerCAmelCase_=1 , lowerCAmelCase_=(7,) , lowerCAmelCase_=(3,) , lowerCAmelCase_=8_0 , lowerCAmelCase_=1 , lowerCAmelCase_=None , lowerCAmelCase_="sum" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_)
a_ =vocab_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =intermediate_size
a_ =num_attention_heads
a_ =attention_head_dim
a_ =max_position_embeddings
a_ =layer_norm_eps
a_ =layerdrop
a_ =hidden_act
a_ =initializer_range
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =pad_token_id
a_ =bos_token_id
a_ =eos_token_id
a_ =conv_glu_dim
a_ =conv_dropout
a_ =num_conv_layers
a_ =input_feat_per_channel
a_ =input_channels
a_ =conv_channels
a_ =ctc_loss_reduction
a_ =ctc_zero_infinity
# prevents config testing fail with exporting to json
a_ =list(lowerCAmelCase_)
a_ =list(lowerCAmelCase_)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""")
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=9_9 , lowerCAmelCase_=6_4 , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Optional[int]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =seq_length
a_ =is_training
a_ =use_input_mask
a_ =use_token_type_ids
a_ =use_labels
a_ =vocab_size
a_ =hidden_size
a_ =embedding_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_labels
a_ =num_choices
a_ =scope
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =None
if self.use_input_mask:
a_ =random_attention_mask([self.batch_size, self.seq_length])
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ =None
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ =ids_tensor([self.batch_size] , self.num_choices)
a_ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a_ =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =self.num_labels
a_ =MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.num_labels
a_ =MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =self.num_choices
a_ =MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =config_and_inputs
a_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Dict = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ : Optional[int] = True
# test_resize_embeddings = False
__magic_name__ : str = False
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> str:
"""simple docstring"""
a_ =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
a_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
a_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =MegatronBertModelTester(self)
a_ =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
lowercase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
@unittest.skip("Model is not available.")
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ ="nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
a_ =os.path.join(os.environ["MYDIR"] , lowerCAmelCase_)
a_ =MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
a_ =_long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
a_ =model(lowerCAmelCase_)[0]
a_ =torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
a_ =[-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
a_ =output[0, ii, jj]
a_ =expected[3 * ii + jj]
a_ ="ii={} jj={} a={} b={}".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase = False
lowercase = logging.get_logger(__name__)
lowercase = '''ybelkada/fonts'''
def UpperCAmelCase_ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
requires_backends(lowercase__ , ["torch"] )
_check_torch_version()
a_ =image_tensor.unsqueeze(0 )
a_ =torch.nn.functional.unfold(lowercase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
a_ =patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowercase__ , lowercase__ , -1 )
a_ =patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( lowercase__ , lowercase__ = 3_6 , lowercase__ = "black" , lowercase__ = "white" , lowercase__ = 5 , lowercase__ = 5 , lowercase__ = 5 , lowercase__ = 5 , lowercase__ = None , lowercase__ = None , ):
'''simple docstring'''
requires_backends(lowercase__ , "vision" )
# Add new lines so that each line is no more than 80 characters.
a_ =textwrap.TextWrapper(width=8_0 )
a_ =wrapper.wrap(text=lowercase__ )
a_ ="\n".join(lowercase__ )
if font_bytes is not None and font_path is None:
a_ =io.BytesIO(lowercase__ )
elif font_path is not None:
a_ =font_path
else:
a_ =hf_hub_download(lowercase__ , "Arial.TTF" )
a_ =ImageFont.truetype(lowercase__ , encoding="UTF-8" , size=lowercase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
a_ =ImageDraw.Draw(Image.new("RGB" , (1, 1) , lowercase__ ) )
a_ , a_ , a_ , a_ =temp_draw.textbbox((0, 0) , lowercase__ , lowercase__ )
# Create the actual image with a bit of padding around the text.
a_ =text_width + left_padding + right_padding
a_ =text_height + top_padding + bottom_padding
a_ =Image.new("RGB" , (image_width, image_height) , lowercase__ )
a_ =ImageDraw.Draw(lowercase__ )
draw.text(xy=(left_padding, top_padding) , text=lowercase__ , fill=lowercase__ , font=lowercase__ )
return image
def UpperCAmelCase_ ( lowercase__ , lowercase__ , **lowercase__ ):
'''simple docstring'''
requires_backends(lowercase__ , "vision" )
# Convert to PIL image if necessary
a_ =to_pil_image(lowercase__ )
a_ =render_text(lowercase__ , **lowercase__ )
a_ =max(header_image.width , image.width )
a_ =int(image.height * (new_width / image.width) )
a_ =int(header_image.height * (new_width / header_image.width) )
a_ =Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
a_ =to_numpy_array(lowercase__ )
if infer_channel_dimension_format(lowercase__ ) == ChannelDimension.LAST:
a_ =to_channel_dimension_format(lowercase__ , ChannelDimension.LAST )
return new_image
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = ["flattened_patches"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 2_0_4_8 , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
a_ =do_normalize
a_ =do_convert_rgb
a_ =max_patches
a_ =is_vqa
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch")
_check_torch_version()
# convert to torch
a_ =to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.FIRST)
a_ =torch.from_numpy(lowerCAmelCase_)
a_ , a_ =patch_size["height"], patch_size["width"]
a_ , a_ =get_image_size(lowerCAmelCase_)
# maximize scale s.t.
a_ =math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
a_ =max(min(math.floor(scale * image_height / patch_height) , lowerCAmelCase_) , 1)
a_ =max(min(math.floor(scale * image_width / patch_width) , lowerCAmelCase_) , 1)
a_ =max(num_feasible_rows * patch_height , 1)
a_ =max(num_feasible_cols * patch_width , 1)
a_ =torch.nn.functional.interpolate(
image.unsqueeze(0) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=lowerCAmelCase_ , antialias=lowerCAmelCase_ , ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
a_ =torch_extract_patches(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
a_ =patches.shape
a_ =patches_shape[1]
a_ =patches_shape[2]
a_ =patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
a_ =patches.reshape([rows * columns, depth])
# [rows * columns, 1]
a_ =torch.arange(lowerCAmelCase_).reshape([rows, 1]).repeat(1 , lowerCAmelCase_).reshape([rows * columns, 1])
a_ =torch.arange(lowerCAmelCase_).reshape([1, columns]).repeat(lowerCAmelCase_ , 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
a_ =row_ids.to(torch.floataa)
a_ =col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
a_ =torch.cat([row_ids, col_ids, patches] , -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
a_ =torch.nn.functional.pad(lowerCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)]).float()
a_ =to_numpy_array(lowerCAmelCase_)
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
a_ =image.astype(np.floataa)
# take mean across the whole `image`
a_ =np.mean(lowerCAmelCase_)
a_ =np.std(lowerCAmelCase_)
a_ =max(lowerCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> ImageInput:
"""simple docstring"""
a_ =do_normalize if do_normalize is not None else self.do_normalize
a_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ =patch_size if patch_size is not None else self.patch_size
a_ =max_patches if max_patches is not None else self.max_patches
a_ =self.is_vqa
if kwargs.get("data_format" , lowerCAmelCase_) is not None:
raise ValueError("data_format is not an accepted input as the outputs are ")
a_ =make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ =[convert_to_rgb(lowerCAmelCase_) for image in images]
# All transformations expect numpy arrays.
a_ =[to_numpy_array(lowerCAmelCase_) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models.")
a_ =kwargs.pop("font_bytes" , lowerCAmelCase_)
a_ =kwargs.pop("font_path" , lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =[header_text] * len(lowerCAmelCase_)
a_ =[
render_header(lowerCAmelCase_ , header_text[i] , font_bytes=lowerCAmelCase_ , font_path=lowerCAmelCase_)
for i, image in enumerate(lowerCAmelCase_)
]
if do_normalize:
a_ =[self.normalize(image=lowerCAmelCase_) for image in images]
# convert to torch tensor and permute
a_ =[
self.extract_flattened_patches(image=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , patch_size=lowerCAmelCase_)
for image in images
]
# create attention mask in numpy
a_ =[(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
a_ =BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=lowerCAmelCase_)
return encoded_outputs
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a_ =len(lowerCAmelCase_) - 1
def lowercase_ ( self , lowerCAmelCase_) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a_ =[]
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase_) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase_) , 5) == 1
return output_values
def lowercase_ ( self , lowerCAmelCase_) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a_ =self.basis_function(lowerCAmelCase_)
a_ =0.0
a_ =0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ ( self , lowerCAmelCase_ = 0.0_1) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
a_ =[] # x coordinates of points to plot
a_ =[] # y coordinates of points to plot
a_ =0.0
while t <= 1:
a_ =self.bezier_curve_function(lowerCAmelCase_)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
a_ =[i[0] for i in self.list_of_points]
a_ =[i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase_ , lowerCAmelCase_ , color="blue" , label="Curve of Degree " + str(self.degree) , )
plt.scatter(lowerCAmelCase_ , lowerCAmelCase_ , color="red" , label="Control Points")
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
a_ =str(lowercase__ )
a_ ="".join(sorted(lowercase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCAmelCase_ ( lowercase__ = 9_9 ):
if not 0 < percent < 1_0_0:
raise ValueError("solution() only accepts values from 0 to 100" )
a_ =0
a_ =1
while True:
if check_bouncy(lowercase__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =ort.SessionOptions()
a_ =False
return options
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a_ =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ ="A red cat sitting on a park bench"
a_ =np.random.RandomState(0)
a_ =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase_ , output_type="np" , )
a_ =output.images
a_ =images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ =np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a_ =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx")
a_ =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ ="A red cat sitting on a park bench"
a_ =np.random.RandomState(0)
a_ =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCAmelCase_ , output_type="np" , )
a_ =output.images
a_ =images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ =np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def lowercase_ ( *lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =ObjectDetectionPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0)
self.assertGreater(len(lowerCAmelCase_) , 0)
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase_ , {
"score": ANY(lowerCAmelCase_),
"label": ANY(lowerCAmelCase_),
"box": {"xmin": ANY(lowerCAmelCase_), "ymin": ANY(lowerCAmelCase_), "xmax": ANY(lowerCAmelCase_), "ymax": ANY(lowerCAmelCase_)},
} , )
import datasets
a_ =datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test")
a_ =[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
a_ =object_detector(lowerCAmelCase_ , threshold=0.0)
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for outputs in batch_outputs:
self.assertGreater(len(lowerCAmelCase_) , 0)
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase_ , {
"score": ANY(lowerCAmelCase_),
"label": ANY(lowerCAmelCase_),
"box": {"xmin": ANY(lowerCAmelCase_), "ymin": ANY(lowerCAmelCase_), "xmax": ANY(lowerCAmelCase_), "ymax": ANY(lowerCAmelCase_)},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF")
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ ="hf-internal-testing/tiny-detr-mobilenetsv3"
a_ =AutoModelForObjectDetection.from_pretrained(lowerCAmelCase_)
a_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_)
a_ =ObjectDetectionPipeline(model=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_)
a_ =object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
] , )
a_ =object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
] , )
@require_torch
@slow
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ ="facebook/detr-resnet-50"
a_ =AutoModelForObjectDetection.from_pretrained(lowerCAmelCase_)
a_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase_)
a_ =ObjectDetectionPipeline(model=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_)
a_ =object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
a_ =object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
])
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ ="facebook/detr-resnet-50"
a_ =pipeline("object-detection" , model=lowerCAmelCase_)
a_ =object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
a_ =object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
])
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =0.9_9_8_5
a_ ="facebook/detr-resnet-50"
a_ =pipeline("object-detection" , model=lowerCAmelCase_)
a_ =object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowerCAmelCase_)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ ="Narsil/layoutlmv3-finetuned-funsd"
a_ =0.9_9_9_3
a_ =pipeline("object-detection" , model=lowerCAmelCase_ , threshold=lowerCAmelCase_)
a_ =object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png")
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
] , )
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowercase = pytest.mark.integration
lowercase = {'''comet'''}
lowercase = importlib.util.find_spec('''fairseq''') is not None
lowercase = {'''code_eval'''}
lowercase = os.name == '''nt'''
lowercase = {'''bertscore''', '''frugalscore''', '''perplexity'''}
lowercase = importlib.util.find_spec('''transformers''') is not None
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(lowercase__ )
def wrapper(self , lowercase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , lowercase__ )
return wrapper
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(lowercase__ )
def wrapper(self , lowercase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , lowercase__ )
return wrapper
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(lowercase__ )
def wrapper(self , lowercase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , lowercase__ )
return wrapper
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
__a , __a , __a)
@local
class UpperCAmelCase ( parameterized.TestCase):
'''simple docstring'''
__magic_name__ : Any = {}
__magic_name__ : Optional[int] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ ="[...]"
a_ =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase_)).module_path)
a_ =datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase_)
# check parameters
a_ =inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCAmelCase_ , metric_module.__name__):
with self.use_local_metrics():
try:
a_ =doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def lowercase_ ( self , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ ="[...]"
a_ =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase_)).module_path)
# run doctest
with self.use_local_metrics():
a_ =doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase_):
yield
else:
yield
@contextmanager
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
def load_local_metric(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_):
return load_metric(os.path.join("metrics" , lowerCAmelCase_) , *lowerCAmelCase_ , **lowerCAmelCase_)
with patch("datasets.load_metric") as mock_load_metric:
a_ =load_local_metric
yield
@classmethod
def lowercase_ ( cls , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
def wrapper(lowerCAmelCase_):
a_ =contextmanager(lowerCAmelCase_)
a_ =patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
assert len(input_dict["input_ids"]) == 2
return np.array([1.0_3, 1.0_4])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
a_ =MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
import torch
def bert_cos_score_idf(lowercase__ , lowercase__ , *lowercase__ , **lowercase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
a_ =bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
def load_from_checkpoint(lowercase__ ):
class UpperCAmelCase :
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
assert len(lowerCAmelCase_) == 2
a_ =[0.1_9, 0.9_2]
return scores, sum(lowerCAmelCase_) / len(lowerCAmelCase_)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
a_ =None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
a_ =load_from_checkpoint
yield
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =load_metric(os.path.join("metrics" , "seqeval" ) )
a_ ="ERROR"
a_ =F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowercase__ , match=re.escape(lowercase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowercase__ )
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model'''}
lowercase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : int = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else bos_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else eos_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else unk_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else pad_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cls_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
a_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
a_ =vocab_file
a_ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase_)
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ ={self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.__dict__.copy()
a_ =None
return state
def __setstate__( self , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a_ ={}
a_ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.sp_model.IdToPiece(lowerCAmelCase_)
return token
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =[]
a_ =""
a_ =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_) + token
a_ =True
a_ =[]
else:
current_sub_tokens.append(lowerCAmelCase_)
a_ =False
out_string += self.sp_model.decode(lowerCAmelCase_)
return out_string.strip()
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> str:
"""simple docstring"""
a_ =kwargs.pop("use_source_tokenizer" , lowerCAmelCase_)
a_ =self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a_ =[]
a_ =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_))
a_ =[]
sub_texts.append(lowerCAmelCase_)
else:
current_sub_text.append(lowerCAmelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
a_ =re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(lowerCAmelCase_))
else:
a_ ="".join(lowerCAmelCase_)
a_ =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a_ =self.clean_up_tokenization(lowerCAmelCase_)
return clean_text
else:
return text
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , "wb") as fi:
a_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ =[self.cls_token_id]
a_ =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_)) + [1]
return [1] + ([0] * len(lowerCAmelCase_)) + [1] + ([0] * len(lowerCAmelCase_)) + [1]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> None:
"""simple docstring"""
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_)
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.