code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase :int = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase :Dict = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =VOCAB_FILES_NAMES
snake_case__ : Any =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Any =['''input_ids''', '''attention_mask''']
snake_case__ : str =None
def __init__( self: str , __a: List[str]=None , __a: str=None , __a: Optional[int]=None , __a: Union[str, Any]="<unk>" , __a: Dict="<s>" , __a: Any="</s>" , __a: Any="<pad>" , __a: Optional[int]=False , __a: List[Any]=False , **__a: Any , )-> List[str]:
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
lowerCamelCase : List[str] = getattr(__a , pre_tok_state.pop("""type""" ) )
lowerCamelCase : Tuple = add_prefix_space
lowerCamelCase : Optional[int] = pre_tok_class(**__a )
lowerCamelCase : Optional[int] = add_prefix_space
def a__ ( self: Optional[Any] , *__a: int , **__a: int )-> BatchEncoding:
lowerCamelCase : Any = kwargs.get("""is_split_into_words""" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__a , **__a )
def a__ ( self: str , *__a: Tuple , **__a: Any )-> BatchEncoding:
lowerCamelCase : Optional[int] = kwargs.get("""is_split_into_words""" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*__a , **__a )
def a__ ( self: Any , __a: str , __a: Optional[str] = None )-> Tuple[str]:
lowerCamelCase : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def a__ ( self: Dict , __a: "Conversation" )-> List[int]:
lowerCamelCase : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
lowerCamelCase : Any = input_ids[-self.model_max_length :]
return input_ids
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self: str )-> List[Any]:
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def a__ ( self: Optional[int] )-> Dict:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def a__ ( self: int )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : Tuple = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
lowerCamelCase : Dict = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def a__ ( self: Optional[Any] )-> Tuple:
lowerCamelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : List[str] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase : Any = DDPMScheduler()
lowerCamelCase : Optional[Any] = AudioDiffusionPipeline(vqvae=__a , unet=self.dummy_unet , mel=__a , scheduler=__a )
lowerCamelCase : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(42 )
lowerCamelCase : Tuple = pipe(generator=__a , steps=4 )
lowerCamelCase : str = output.audios[0]
lowerCamelCase : Tuple = output.images[0]
lowerCamelCase : Optional[int] = torch.Generator(device=__a ).manual_seed(42 )
lowerCamelCase : Any = pipe(generator=__a , steps=4 , return_dict=__a )
lowerCamelCase : Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase : List[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : Optional[int] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : Any = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase : Optional[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase : int = DDIMScheduler()
lowerCamelCase : int = self.dummy_vqvae_and_unet
lowerCamelCase : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__a , scheduler=__a )
lowerCamelCase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
np.random.seed(0 )
lowerCamelCase : List[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase : List[str] = torch.Generator(device=__a ).manual_seed(42 )
lowerCamelCase : Optional[int] = pipe(raw_audio=__a , generator=__a , start_step=5 , steps=10 )
lowerCamelCase : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase : Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : str = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase : List[str] = self.dummy_unet_condition
lowerCamelCase : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__a , mel=__a , scheduler=__a )
lowerCamelCase : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
np.random.seed(0 )
lowerCamelCase : Tuple = torch.rand((1, 1, 10) )
lowerCamelCase : Any = pipe(generator=__a , encoding=__a )
lowerCamelCase : int = output.images[0]
lowerCamelCase : str = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : Tuple = torch_device
lowerCamelCase : List[str] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
lowerCamelCase : List[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(42 )
lowerCamelCase : int = pipe(generator=__a )
lowerCamelCase : int = output.audios[0]
lowerCamelCase : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase : Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : List[str] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] =IFInpaintingSuperResolutionPipeline
snake_case__ : Tuple =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''})
snake_case__ : Optional[Any] =PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self: Optional[Any] )-> Tuple:
return self._get_superresolution_dummy_components()
def a__ ( self: List[Any] , __a: List[Any] , __a: int=0 )-> List[str]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : List[Any] = torch.manual_seed(__a )
else:
lowerCamelCase : Dict = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self: Optional[int] )-> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a__ ( self: Any )-> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def a__ ( self: List[Any] )-> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ ( self: Optional[Any] )-> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ ( self: Optional[Any] )-> int:
self._test_save_load_local()
def a__ ( self: Dict )-> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from math import pow, sqrt
def snake_case ( *UpperCamelCase__ : float ) -> bool:
lowerCamelCase : Dict = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def snake_case ( UpperCamelCase__ : str ) -> Dict:
return data[1:] + data[0]
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> List[Any]:
lowerCamelCase : Union[str, Any] = """"""
for i in range(len(UpperCamelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = int("""0b""" + data[0] + data[-1] , 2 )
lowerCamelCase : Optional[int] = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> Union[str, Any]:
lowerCamelCase : int = message[:4]
lowerCamelCase : int = message[4:]
lowerCamelCase : List[str] = apply_table(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = xor(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = apply_sbox(UpperCamelCase__ , temp[:4] ) # noqa: E741
lowerCamelCase : List[str] = apply_sbox(UpperCamelCase__ , temp[4:] )
lowerCamelCase : str = """0""" * (2 - len(UpperCamelCase__ )) + l # noqa: E741
lowerCamelCase : List[Any] = """0""" * (2 - len(UpperCamelCase__ )) + r
lowerCamelCase : Optional[Any] = apply_table(l + r , UpperCamelCase__ )
lowerCamelCase : Dict = xor(UpperCamelCase__ , UpperCamelCase__ )
return temp + right
if __name__ == "__main__":
__lowerCamelCase :Dict = input('Enter 10 bit key: ')
__lowerCamelCase :int = input('Enter 8 bit message: ')
__lowerCamelCase :List[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
__lowerCamelCase :Any = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__lowerCamelCase :List[str] = [2, 4, 3, 1]
__lowerCamelCase :List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__lowerCamelCase :Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
__lowerCamelCase :str = [4, 1, 2, 3, 2, 3, 4, 1]
__lowerCamelCase :List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowerCamelCase :str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowerCamelCase :int = apply_table(key, paa_table)
__lowerCamelCase :str = temp[:5]
__lowerCamelCase :Tuple = temp[5:]
__lowerCamelCase :Union[str, Any] = left_shift(left)
__lowerCamelCase :List[str] = left_shift(right)
__lowerCamelCase :Optional[Any] = apply_table(left + right, pa_table)
__lowerCamelCase :str = left_shift(left)
__lowerCamelCase :Optional[int] = left_shift(right)
__lowerCamelCase :Union[str, Any] = left_shift(left)
__lowerCamelCase :int = left_shift(right)
__lowerCamelCase :Union[str, Any] = apply_table(left + right, pa_table)
# encryption
__lowerCamelCase :Union[str, Any] = apply_table(message, IP)
__lowerCamelCase :Any = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :int = temp[4:] + temp[:4]
__lowerCamelCase :List[str] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :Tuple = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__lowerCamelCase :Union[str, Any] = apply_table(CT, IP)
__lowerCamelCase :Union[str, Any] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :int = temp[4:] + temp[:4]
__lowerCamelCase :Optional[int] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :List[str] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
__lowerCamelCase :Dict = [0, 2, 4, 6, 8]
__lowerCamelCase :List[Any] = [1, 3, 5, 7, 9]
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase : Any = 0
for digit in range(10 ):
lowerCamelCase : List[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ )
return result
lowerCamelCase : Tuple = 0
for digita in range(10 ):
lowerCamelCase : str = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase : Union[str, Any] = ODD_DIGITS
else:
lowerCamelCase : int = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def snake_case ( UpperCamelCase__ : int = 9 ) -> int:
lowerCamelCase : Dict = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[Any] , __a: Dict=3 , __a: str=32 , __a: Dict=3 , __a: Dict=10 , __a: List[str]=[10, 20, 30, 40] , __a: Optional[Any]=[1, 1, 2, 1] , __a: Any=True , __a: Union[str, Any]=True , __a: Union[str, Any]="relu" , __a: Union[str, Any]=3 , __a: List[str]=None , )-> List[Any]:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : List[Any] = embeddings_size
lowerCamelCase : str = hidden_sizes
lowerCamelCase : int = depths
lowerCamelCase : Union[str, Any] = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[Any] = scope
lowerCamelCase : List[str] = len(__a )
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[str] )-> Tuple:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__ ( self: List[str] , __a: int , __a: Dict , __a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = TFResNetModel(config=__a )
lowerCamelCase : int = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: Optional[Any] , __a: Union[str, Any] , __a: str , __a: Optional[int] )-> List[Any]:
lowerCamelCase : Union[str, Any] = self.num_labels
lowerCamelCase : List[str] = TFResNetForImageClassification(__a )
lowerCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: int )-> Union[str, Any]:
lowerCamelCase : List[str] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = config_and_inputs
lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case__ : Tuple =(
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case__ : Optional[int] =False
snake_case__ : Optional[Any] =False
snake_case__ : Optional[int] =False
snake_case__ : List[Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : Tuple = TFResNetModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def a__ ( self: Union[str, Any] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def a__ ( self: int )-> List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def a__ ( self: Optional[Any] )-> Optional[Any]:
pass
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : str = model_class(__a )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Dict = [*signature.parameters.keys()]
lowerCamelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: int )-> Any:
def check_hidden_states_output(__a: List[Any] , __a: Dict , __a: Optional[Any] ):
lowerCamelCase : Optional[Any] = model_class(__a )
lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Tuple = layer_type
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: int )-> List[str]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: int )-> Dict:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = TFResNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> List[str]:
lowerCamelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: int )-> Dict:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self: Dict )-> str:
lowerCamelCase : Optional[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : Union[str, Any] = self.default_image_processor
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=__a , return_tensors="""tf""" )
# forward pass
lowerCamelCase : int = model(**__a )
# verify the logits
lowerCamelCase : Tuple = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[Any] = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1e-4 ) )
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
import sys
def snake_case ( UpperCamelCase__ : Any ) -> Optional[int]:
lowerCamelCase : List[str] = len(UpperCamelCase__ )
lowerCamelCase : str = [[0 for x in range(UpperCamelCase__ )] for x in range(UpperCamelCase__ )]
lowerCamelCase : Optional[Any] = [[0 for x in range(UpperCamelCase__ )] for x in range(UpperCamelCase__ )]
for chain_length in range(2 , UpperCamelCase__ ):
for a in range(1 , n - chain_length + 1 ):
lowerCamelCase : Optional[int] = a + chain_length - 1
lowerCamelCase : Dict = sys.maxsize
for c in range(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCamelCase : Any = cost
lowerCamelCase : int = c
return matrix, sol
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
if i == j:
print("""A""" + str(UpperCamelCase__ ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(UpperCamelCase__ , UpperCamelCase__ , optimal_solution[i][j] )
print_optiomal_solution(UpperCamelCase__ , optimal_solution[i][j] + 1 , UpperCamelCase__ )
print(""")""" , end=""" """ )
def snake_case ( ) -> int:
lowerCamelCase : List[Any] = [30, 35, 15, 5, 10, 20, 25]
lowerCamelCase : Dict = len(UpperCamelCase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCamelCase , lowerCamelCase : Any = matrix_chain_order(UpperCamelCase__ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(UpperCamelCase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A__ ( pl.LightningModule):
"""simple docstring"""
def __init__( self: List[str] , __a: Union[str, Any] )-> Tuple:
super().__init__()
lowerCamelCase : str = model
lowerCamelCase : Any = 2
lowerCamelCase : str = nn.Linear(self.model.config.hidden_size , self.num_labels )
def a__ ( self: List[str] )-> Union[str, Any]:
pass
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Optional[Any]:
# load longformer model from model identifier
lowerCamelCase : Any = LongformerModel.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Any = LightningModel(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCamelCase : int = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase__ )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
__lowerCamelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :str = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
__lowerCamelCase :Tuple = '2020.9.26'
__lowerCamelCase :List[str] = 'xcodz-dot, cclaus, dhruvmanila'
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> tuple[float, float]:
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in locals().values() ):
lowerCamelCase : Optional[Any] = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(UpperCamelCase__ )
lowerCamelCase : int = ((x * distance) / (z + distance)) * scale
lowerCamelCase : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> tuple[float, float, float]:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""Axis must be a str""" )
lowerCamelCase : Any = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in input_variables.values() ):
lowerCamelCase : str = (
"""Input values except axis must either be float or int: """
F'{list(input_variables.values() )}'
)
raise TypeError(UpperCamelCase__ )
lowerCamelCase : Optional[int] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCamelCase : Optional[Any] = x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
lowerCamelCase : Optional[int] = y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
lowerCamelCase : str = z
elif axis == "x":
lowerCamelCase : str = y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
lowerCamelCase : int = z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
lowerCamelCase : Dict = x
elif axis == "y":
lowerCamelCase : List[Any] = x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
lowerCamelCase : List[str] = z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
lowerCamelCase : List[Any] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :Optional[int] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[str] ='''perceiver'''
def __init__( self: Dict , __a: Any=256 , __a: int=1_280 , __a: int=768 , __a: Any=1 , __a: Optional[Any]=26 , __a: List[Any]=8 , __a: List[str]=8 , __a: Union[str, Any]=None , __a: str=None , __a: str="kv" , __a: List[str]=1 , __a: List[Any]=1 , __a: List[str]="gelu" , __a: Optional[int]=0.1 , __a: Optional[int]=0.02 , __a: int=1e-1_2 , __a: Any=True , __a: Any=262 , __a: Any=2_048 , __a: List[str]=56 , __a: Any=[368, 496] , __a: List[Any]=16 , __a: Any=1_920 , __a: int=16 , __a: List[Any]=[1, 16, 224, 224] , **__a: Union[str, Any] , )-> List[str]:
super().__init__(**__a )
lowerCamelCase : Union[str, Any] = num_latents
lowerCamelCase : Tuple = d_latents
lowerCamelCase : Optional[Any] = d_model
lowerCamelCase : Any = num_blocks
lowerCamelCase : List[str] = num_self_attends_per_block
lowerCamelCase : List[Any] = num_self_attention_heads
lowerCamelCase : int = num_cross_attention_heads
lowerCamelCase : List[str] = qk_channels
lowerCamelCase : List[Any] = v_channels
lowerCamelCase : Union[str, Any] = cross_attention_shape_for_attention
lowerCamelCase : Tuple = self_attention_widening_factor
lowerCamelCase : str = cross_attention_widening_factor
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Optional[Any] = layer_norm_eps
lowerCamelCase : Optional[Any] = use_query_residual
# masked language modeling attributes
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : Tuple = max_position_embeddings
# image classification attributes
lowerCamelCase : Union[str, Any] = image_size
# flow attributes
lowerCamelCase : List[Any] = train_size
# multimodal autoencoding attributes
lowerCamelCase : List[str] = num_frames
lowerCamelCase : Any = audio_samples_per_frame
lowerCamelCase : Tuple = samples_per_patch
lowerCamelCase : int = output_shape
class A__ ( __lowercase):
"""simple docstring"""
@property
def a__ ( self: int )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def a__ ( self: List[str] )-> float:
return 1e-4
def a__ ( self: Union[str, Any] , __a: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a: int = -1 , __a: int = -1 , __a: int = -1 , __a: bool = False , __a: Optional[TensorType] = None , __a: int = 3 , __a: int = 40 , __a: int = 40 , )-> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase : int = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase : List[str] = preprocessor.num_special_tokens_to_add(__a )
lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase : Optional[Any] = [""" """.join(["""a"""] ) * seq_length] * batch_size
lowerCamelCase : str = dict(preprocessor(__a , return_tensors=__a ) )
lowerCamelCase : List[str] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCamelCase : Dict = self._generate_dummy_images(__a , __a , __a , __a )
lowerCamelCase : str = dict(preprocessor(images=__a , return_tensors=__a ) )
lowerCamelCase : str = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
from collections import namedtuple
__lowerCamelCase :Dict = namedtuple('from_to', 'from_ to')
__lowerCamelCase :int = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(UpperCamelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(UpperCamelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: List[Any] , __a: Distribution , __a: List[Any]=None , __a: Optional[int]=None , __a: Union[str, Any]=0 )-> Any:
lowerCamelCase : Any = 1.0 if scale is None else scale
lowerCamelCase : int = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def a__ ( self: Tuple )-> Union[str, Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self: Dict )-> List[str]:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self: List[str] )-> int:
return self.variance.sqrt()
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Optional[int] , __a: int , __a: Dict[str, int] , __a: Callable[..., Tuple[torch.Tensor]] , **__a: Optional[Any] )-> None:
super().__init__(**__a )
lowerCamelCase : Optional[int] = args_dim
lowerCamelCase : List[str] = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
lowerCamelCase : str = domain_map
def a__ ( self: List[str] , __a: torch.Tensor )-> Tuple[torch.Tensor]:
lowerCamelCase : List[Any] = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: str )-> Tuple:
super().__init__()
lowerCamelCase : Any = function
def a__ ( self: str , __a: Union[str, Any] , *__a: Optional[Any] )-> Any:
return self.function(__a , *__a )
class A__ :
"""simple docstring"""
snake_case__ : type
snake_case__ : int
snake_case__ : Dict[str, int]
def __init__( self: Dict , __a: int = 1 )-> None:
lowerCamelCase : List[Any] = dim
lowerCamelCase : List[Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self: List[str] , __a: Dict )-> List[Any]:
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def a__ ( self: Any , __a: str , __a: Optional[torch.Tensor] = None , __a: Optional[torch.Tensor] = None , )-> Distribution:
lowerCamelCase : Union[str, Any] = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def a__ ( self: str )-> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self: Union[str, Any] )-> int:
return len(self.event_shape )
@property
def a__ ( self: Dict )-> float:
return 0.0
def a__ ( self: Union[str, Any] , __a: int )-> nn.Module:
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self: Any , *__a: torch.Tensor )-> str:
raise NotImplementedError()
@staticmethod
def a__ ( __a: torch.Tensor )-> torch.Tensor:
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict[str, int] ={"df": 1, "loc": 1, "scale": 1}
snake_case__ : type =StudentT
@classmethod
def a__ ( cls: Optional[Any] , __a: torch.Tensor , __a: torch.Tensor , __a: torch.Tensor )-> Tuple:
lowerCamelCase : Optional[Any] = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase : Optional[int] = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict[str, int] ={"loc": 1, "scale": 1}
snake_case__ : type =Normal
@classmethod
def a__ ( cls: Optional[int] , __a: torch.Tensor , __a: torch.Tensor )-> str:
lowerCamelCase : int = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict[str, int] ={"total_count": 1, "logits": 1}
snake_case__ : type =NegativeBinomial
@classmethod
def a__ ( cls: Tuple , __a: torch.Tensor , __a: torch.Tensor )-> Union[str, Any]:
lowerCamelCase : int = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self: List[Any] , __a: Union[str, Any] )-> Distribution:
lowerCamelCase , lowerCamelCase : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def a__ ( self: str , __a: Any , __a: Optional[torch.Tensor] = None , __a: Optional[torch.Tensor] = None )-> Distribution:
lowerCamelCase , lowerCamelCase : List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int | float | str ) -> tuple[int, int]:
try:
lowerCamelCase : Optional[Any] = float(UpperCamelCase__ )
except ValueError:
raise ValueError("""Please enter a valid number""" )
lowerCamelCase : Union[str, Any] = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
lowerCamelCase : List[str] = len(str(UpperCamelCase__ ).split(""".""" )[1] )
lowerCamelCase : int = int(decimal * (10**number_of_frac_digits) )
lowerCamelCase : Dict = 10**number_of_frac_digits
lowerCamelCase , lowerCamelCase : Any = denominator, numerator
while True:
lowerCamelCase : Tuple = dividend % divisor
if remainder == 0:
break
lowerCamelCase , lowerCamelCase : Optional[int] = divisor, remainder
lowerCamelCase , lowerCamelCase : Union[str, Any] = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction("67") = }""")
print(f"""{decimal_to_fraction("45.0") = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction("6.25") = }""")
print(f"""{decimal_to_fraction("78td") = }""")
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 1.6_021e-19 # units = C
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : List[Any] =['''flax''']
def __init__( self: List[Any] , *__a: List[str] , **__a: int )-> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[int] , *__a: List[str] , **__a: Union[str, Any] )-> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: List[str] , *__a: Optional[Any] , **__a: str )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : int =['''flax''']
def __init__( self: Union[str, Any] , *__a: Optional[int] , **__a: Optional[int] )-> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: str , *__a: Any , **__a: Tuple )-> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: List[Any] , *__a: int , **__a: List[Any] )-> List[Any]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =['''flax''']
def __init__( self: Tuple , *__a: List[Any] , **__a: List[str] )-> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[int] , *__a: Optional[Any] , **__a: Optional[int] )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: str , *__a: List[str] , **__a: List[Any] )-> Tuple:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Tuple =['''flax''']
def __init__( self: str , *__a: Tuple , **__a: Union[str, Any] )-> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: List[str] , *__a: Any , **__a: Optional[Any] )-> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: Tuple , **__a: List[Any] )-> Any:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =['''flax''']
def __init__( self: str , *__a: Optional[int] , **__a: Tuple )-> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[int] , *__a: Optional[Any] , **__a: int )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: List[Any] , *__a: int , **__a: Tuple )-> List[Any]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : int =['''flax''']
def __init__( self: int , *__a: Tuple , **__a: Tuple )-> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Tuple , *__a: str , **__a: int )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: List[str] , *__a: Optional[int] , **__a: List[Any] )-> Tuple:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Optional[int] =['''flax''']
def __init__( self: Optional[Any] , *__a: Union[str, Any] , **__a: Dict )-> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Tuple , *__a: str , **__a: Any )-> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[int] , *__a: List[str] , **__a: str )-> List[Any]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Dict =['''flax''']
def __init__( self: Any , *__a: Any , **__a: Tuple )-> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: List[Any] , *__a: Union[str, Any] , **__a: Dict )-> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Any , *__a: Any , **__a: List[Any] )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Dict =['''flax''']
def __init__( self: Dict , *__a: Tuple , **__a: List[str] )-> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Any , *__a: List[str] , **__a: List[Any] )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Union[str, Any] , *__a: Optional[int] , **__a: str )-> Any:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =['''flax''']
def __init__( self: Optional[int] , *__a: str , **__a: Union[str, Any] )-> List[str]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: Tuple , **__a: str )-> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: int , **__a: List[Any] )-> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : List[str] =['''flax''']
def __init__( self: int , *__a: Optional[int] , **__a: List[str] )-> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: List[Any] , **__a: List[Any] )-> Optional[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Any , *__a: Tuple , **__a: Tuple )-> int:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Tuple =['''flax''']
def __init__( self: Optional[Any] , *__a: int , **__a: Union[str, Any] )-> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Tuple , *__a: List[Any] , **__a: Union[str, Any] )-> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: List[str] , **__a: Optional[int] )-> Tuple:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : str =['''flax''']
def __init__( self: Dict , *__a: Optional[int] , **__a: Dict )-> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Union[str, Any] , *__a: List[Any] , **__a: Union[str, Any] )-> Optional[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Any , *__a: Optional[int] , **__a: Tuple )-> List[str]:
requires_backends(cls , ["""flax"""] )
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Any = logging.get_logger(__name__)
__lowerCamelCase :Dict = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any ='''rwkv'''
snake_case__ : str ={'''max_position_embeddings''': '''context_length'''}
def __init__( self: int , __a: Optional[int]=50_277 , __a: Optional[int]=1_024 , __a: Dict=4_096 , __a: Dict=32 , __a: Optional[Any]=None , __a: Dict=None , __a: int=1e-5 , __a: List[str]=0 , __a: Any=0 , __a: Union[str, Any]=6 , __a: Optional[Any]=False , __a: Optional[Any]=True , **__a: List[str] , )-> Union[str, Any]:
lowerCamelCase : int = vocab_size
lowerCamelCase : int = context_length
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase : List[str] = layer_norm_epsilon
lowerCamelCase : Any = rescale_every
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(
tie_word_embeddings=__a , bos_token_id=__a , eos_token_id=__a , **__a )
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__lowerCamelCase :str = 50_003
__lowerCamelCase :int = 50_002
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =PLBartTokenizer
snake_case__ : Any =None
snake_case__ : Dict =False
def a__ ( self: int )-> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : List[Any] = PLBartTokenizer(__a , language_codes="""base""" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: int )-> str:
lowerCamelCase : Optional[Any] = PLBartTokenizer(__a , language_codes="""base""" , keep_accents=__a )
lowerCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCamelCase : List[Any] = tokenizer.vocab_size
lowerCamelCase : str = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowerCamelCase : Optional[Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowerCamelCase : Optional[int] = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = PLBartTokenizer(__a , language_codes="""multi""" , keep_accents=__a )
lowerCamelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : str = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCamelCase : Tuple = tokenizer.vocab_size
lowerCamelCase : int = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowerCamelCase : Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowerCamelCase : List[Any] = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] ='''uclanlp/plbart-python-en_XX'''
snake_case__ : str =[
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
snake_case__ : Dict =[
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
snake_case__ : Tuple =[
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def a__ ( cls: Any )-> Optional[Any]:
lowerCamelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
lowerCamelCase : int = 1
return cls
def a__ ( self: Tuple )-> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50_003 )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def a__ ( self: Optional[int] )-> Tuple:
self.assertIn(__a , self.tokenizer.all_special_ids )
lowerCamelCase : List[str] = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCamelCase : int = self.tokenizer.decode(__a , skip_special_tokens=__a )
lowerCamelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : List[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __a )
lowerCamelCase : Dict = 10
lowerCamelCase : str = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def a__ ( self: Optional[Any] )-> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50_004, 50_001] )
def a__ ( self: int )-> Dict:
lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
lowerCamelCase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
lowerCamelCase : str = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def a__ ( self: Optional[Any] )-> Optional[Any]:
lowerCamelCase : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
lowerCamelCase : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase : Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Dict = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="""pt""" )
lowerCamelCase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="""pt""" )
lowerCamelCase : Optional[int] = targets["""input_ids"""]
lowerCamelCase : int = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a__ ( self: List[Any] )-> Any:
lowerCamelCase : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50_003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50_001,
} , )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
lowerCamelCase : List[Any] = sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : x[0] / x[1] , reverse=UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[Any] = [i[0] for i in r], [i[1] for i in r]
lowerCamelCase : Optional[int] = list(accumulate(UpperCamelCase__ ) )
lowerCamelCase : int = bisect(UpperCamelCase__ , UpperCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
import pprint
import requests
__lowerCamelCase :str = 'https://zenquotes.io/api'
def snake_case ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def snake_case ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCamelCase :Dict = random_quotes()
pprint.pprint(response)
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple =KandinskyVaaControlnetImgaImgPipeline
snake_case__ : List[Any] =['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
snake_case__ : Dict =['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
snake_case__ : Optional[Any] =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ : List[Any] =False
@property
def a__ ( self: Tuple )-> Optional[int]:
return 32
@property
def a__ ( self: int )-> List[Any]:
return 32
@property
def a__ ( self: Optional[int] )-> List[Any]:
return self.time_input_dim
@property
def a__ ( self: Optional[int] )-> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self: Tuple )-> Any:
return 100
@property
def a__ ( self: Dict )-> int:
torch.manual_seed(0 )
lowerCamelCase : Tuple = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCamelCase : Optional[Any] = UNetaDConditionModel(**__a )
return model
@property
def a__ ( self: Optional[int] )-> Optional[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ ( self: str )-> str:
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self: List[str] )-> int:
lowerCamelCase : str = self.dummy_unet
lowerCamelCase : Tuple = self.dummy_movq
lowerCamelCase : List[Any] = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCamelCase : str = DDIMScheduler(**__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self: Tuple , __a: List[str] , __a: Optional[Any]=0 )-> int:
lowerCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create init_image
lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase : Tuple = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
lowerCamelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Optional[int] = torch.manual_seed(__a )
else:
lowerCamelCase : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Any = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self: Dict )-> Optional[int]:
lowerCamelCase : str = """cpu"""
lowerCamelCase : Optional[int] = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = self.pipeline_class(**__a )
lowerCamelCase : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(__a ) )
lowerCamelCase : Tuple = output.images
lowerCamelCase : List[Any] = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : List[str] = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Union[str, Any] )-> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
lowerCamelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCamelCase : int = init_image.resize((512, 512) )
lowerCamelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
lowerCamelCase : Optional[Any] = torch.from_numpy(np.array(__a ) ).float() / 2_55.0
lowerCamelCase : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase : List[Any] = """A robot, 4k photo"""
lowerCamelCase : Dict = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
lowerCamelCase : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
lowerCamelCase : Any = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase , lowerCamelCase : Optional[Any] = pipe_prior(
__a , image=__a , strength=0.85 , generator=__a , negative_prompt="""""" , ).to_tuple()
lowerCamelCase : List[Any] = pipeline(
image=__a , image_embeds=__a , negative_image_embeds=__a , hint=__a , generator=__a , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__a , __a )
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =['''image_processor''', '''feature_extractor''']
snake_case__ : Dict ='''TvltImageProcessor'''
snake_case__ : Any ='''TvltFeatureExtractor'''
def __init__( self: Dict , __a: Dict , __a: List[Any] )-> Tuple:
super().__init__(image_processor=__a , feature_extractor=__a )
lowerCamelCase : Union[str, Any] = image_processor
lowerCamelCase : str = feature_extractor
def __call__( self: Tuple , __a: Dict=None , __a: Optional[int]=None , __a: Any=None , __a: Any=None , __a: Tuple=False , __a: Union[str, Any]=False , *__a: Optional[Any] , **__a: Any , )-> str:
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
lowerCamelCase : List[Any] = None
if images is not None:
lowerCamelCase : List[str] = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
lowerCamelCase : int = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
lowerCamelCase : Tuple = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
lowerCamelCase : Dict = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : str = self.image_processor.model_input_names
lowerCamelCase : Optional[int] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def a__ ( __a: Optional[Any] )-> int:
lowerCamelCase : int = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ ( self: Dict , __a: Union[str, Any] , __a: Tuple , *__a: Tuple , **__a: Optional[int] )-> Any:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
lowerCamelCase : List[str] = kwargs.pop("""main_process_only""" , __a )
lowerCamelCase : int = kwargs.pop("""in_order""" , __a )
if self.isEnabledFor(__a ):
if self._should_log(__a ):
lowerCamelCase , lowerCamelCase : List[Any] = self.process(__a , __a )
self.logger.log(__a , __a , *__a , **__a )
elif in_order:
lowerCamelCase : Union[str, Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCamelCase , lowerCamelCase : Any = self.process(__a , __a )
self.logger.log(__a , __a , *__a , **__a )
state.wait_for_everyone()
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str = None ) -> List[Any]:
if log_level is None:
lowerCamelCase : List[Any] = os.environ.get("""ACCELERATE_LOG_LEVEL""" , UpperCamelCase__ )
lowerCamelCase : str = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase :Optional[int] = 16
__lowerCamelCase :Tuple = 32
def snake_case ( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" ) -> Dict:
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase : List[Any] = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> int:
model.eval()
lowerCamelCase : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase : int = model(**UpperCamelCase__ )
lowerCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase , lowerCamelCase : List[str] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase__ ) - 1:
lowerCamelCase : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
lowerCamelCase : Tuple = metric.compute()
return eval_metric["accuracy"]
def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
# Initialize accelerator
lowerCamelCase : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase : str = config["""lr"""]
lowerCamelCase : Any = int(config["""num_epochs"""] )
lowerCamelCase : int = int(config["""seed"""] )
lowerCamelCase : Optional[Any] = int(config["""batch_size"""] )
lowerCamelCase : Tuple = args.model_name_or_path
set_seed(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[Any] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
lowerCamelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase : Dict = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase : Any = 1
lowerCamelCase : List[str] = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
lowerCamelCase : str = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase : int = 0
lowerCamelCase : int = evaluate.load("""glue""" , """mrpc""" )
lowerCamelCase : int = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase : str = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCamelCase : Optional[int] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase : List[str] = int(UpperCamelCase__ ) + 1
lowerCamelCase : Any = evaluation_loop(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.print("""resumed checkpoint performance:""" , UpperCamelCase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
lowerCamelCase : Dict = json.load(UpperCamelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase : int = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
lowerCamelCase : Tuple = model(**UpperCamelCase__ )
lowerCamelCase : List[str] = outputs.loss
lowerCamelCase : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase : List[Any] = F'epoch_{epoch}'
lowerCamelCase : Dict = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
lowerCamelCase : List[Any] = evaluation_loop(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Any = accuracy
lowerCamelCase : Optional[Any] = lr_scheduler.get_lr()[0]
lowerCamelCase : Union[str, Any] = optimizer.param_groups[0]["""lr"""]
lowerCamelCase : Tuple = epoch
lowerCamelCase : Optional[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( ) -> Any:
lowerCamelCase : str = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=UpperCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCamelCase__ , )
parser.add_argument(
"""--output_dir""" , type=UpperCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=UpperCamelCase__ , default=2 , help="""Number of train epochs.""" , )
lowerCamelCase : Optional[int] = parser.parse_args()
lowerCamelCase : str = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A__ ( datasets.BuilderConfig):
"""simple docstring"""
snake_case__ : Optional[datasets.Features] =None
class A__ ( datasets.ArrowBasedBuilder):
"""simple docstring"""
snake_case__ : Optional[int] =PandasConfig
def a__ ( self: Tuple )-> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self: Optional[int] , __a: Optional[int] )-> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCamelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
lowerCamelCase : Any = data_files
if isinstance(__a , __a ):
lowerCamelCase : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase : Any = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
lowerCamelCase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase : Dict = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self: Optional[Any] , __a: pa.Table )-> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase : Dict = table_cast(__a , self.config.features.arrow_schema )
return pa_table
def a__ ( self: List[str] , __a: Any )-> Dict:
for i, file in enumerate(itertools.chain.from_iterable(__a ) ):
with open(__a , """rb""" ) as f:
lowerCamelCase : Union[str, Any] = pa.Table.from_pandas(pd.read_pickle(__a ) )
yield i, self._cast_table(__a )
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :Optional[Any] = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] ='''xlm-prophetnet'''
snake_case__ : Any =['''past_key_values''']
snake_case__ : Dict ={
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self: List[str] , __a: Optional[float] = 0.1 , __a: Optional[Union[str, Callable]] = "gelu" , __a: Optional[int] = 30_522 , __a: Optional[int] = 1_024 , __a: Optional[int] = 4_096 , __a: Optional[int] = 12 , __a: Optional[int] = 16 , __a: Optional[int] = 4_096 , __a: Optional[int] = 12 , __a: Optional[int] = 16 , __a: Optional[float] = 0.1 , __a: Optional[float] = 0.1 , __a: Optional[int] = 512 , __a: Optional[float] = 0.02 , __a: Optional[bool] = True , __a: Optional[bool] = True , __a: Optional[int] = 0 , __a: Optional[int] = 2 , __a: Optional[int] = 32 , __a: Optional[int] = 128 , __a: Optional[bool] = False , __a: Optional[float] = 0.0 , __a: Optional[bool] = True , __a: Optional[int] = 0 , __a: Optional[int] = 1 , __a: Optional[int] = 2 , **__a: Dict , )-> int:
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = encoder_ffn_dim
lowerCamelCase : int = num_encoder_layers
lowerCamelCase : Any = num_encoder_attention_heads
lowerCamelCase : Dict = decoder_ffn_dim
lowerCamelCase : Dict = num_decoder_layers
lowerCamelCase : Dict = num_decoder_attention_heads
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Union[str, Any] = init_std # Normal(0, this parameter)
lowerCamelCase : Optional[Any] = activation_function
# parameters for xlmprophetnet
lowerCamelCase : Dict = ngram
lowerCamelCase : Tuple = num_buckets
lowerCamelCase : Optional[int] = relative_max_distance
lowerCamelCase : Tuple = disable_ngram_loss
lowerCamelCase : List[str] = eps
# 3 Types of Dropout
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : str = activation_dropout
lowerCamelCase : Tuple = dropout
lowerCamelCase : Optional[int] = use_cache
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , add_cross_attention=__a , decoder_start_token_id=__a , **__a , )
@property
def a__ ( self: Optional[Any] )-> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def a__ ( self: List[str] , __a: Any )-> Union[str, Any]:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase :int = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] ='''data2vec-audio'''
def __init__( self: Dict , __a: Tuple=32 , __a: str=768 , __a: str=12 , __a: Tuple=12 , __a: Union[str, Any]=3_072 , __a: Union[str, Any]="gelu" , __a: str=0.1 , __a: Optional[Any]=0.1 , __a: Any=0.1 , __a: str=0.0 , __a: Union[str, Any]=0.1 , __a: Optional[Any]=0.1 , __a: Optional[int]=0.02 , __a: Union[str, Any]=1e-5 , __a: Tuple="gelu" , __a: Tuple=(512, 512, 512, 512, 512, 512, 512) , __a: Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __a: List[Any]=(10, 3, 3, 3, 3, 2, 2) , __a: Optional[Any]=False , __a: Optional[Any]=16 , __a: Dict=19 , __a: Optional[Any]=5 , __a: List[str]=0.05 , __a: Union[str, Any]=10 , __a: Tuple=2 , __a: int=0.0 , __a: Union[str, Any]=10 , __a: str=0 , __a: Union[str, Any]="sum" , __a: Union[str, Any]=False , __a: Dict=False , __a: Optional[int]=256 , __a: Any=(512, 512, 512, 512, 1_500) , __a: Optional[int]=(5, 3, 3, 1, 1) , __a: List[str]=(1, 2, 3, 1, 1) , __a: List[str]=512 , __a: Union[str, Any]=0 , __a: Dict=1 , __a: Union[str, Any]=2 , __a: Tuple=False , __a: str=3 , __a: Dict=2 , __a: Optional[Any]=3 , __a: Optional[Any]=None , **__a: str , )-> int:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : List[str] = feat_extract_activation
lowerCamelCase : Dict = list(__a )
lowerCamelCase : Tuple = list(__a )
lowerCamelCase : Optional[Any] = list(__a )
lowerCamelCase : List[Any] = conv_bias
lowerCamelCase : Tuple = num_conv_pos_embeddings
lowerCamelCase : List[str] = num_conv_pos_embedding_groups
lowerCamelCase : Any = conv_pos_kernel_size
lowerCamelCase : int = len(self.conv_dim )
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_dropout
lowerCamelCase : int = attention_dropout
lowerCamelCase : int = activation_dropout
lowerCamelCase : Optional[Any] = feat_proj_dropout
lowerCamelCase : Union[str, Any] = final_dropout
lowerCamelCase : Union[str, Any] = layerdrop
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Dict = mask_time_prob
lowerCamelCase : str = mask_time_length
lowerCamelCase : Tuple = mask_time_min_masks
lowerCamelCase : List[Any] = mask_feature_prob
lowerCamelCase : Union[str, Any] = mask_feature_length
lowerCamelCase : int = mask_feature_min_masks
# ctc loss
lowerCamelCase : List[str] = ctc_loss_reduction
lowerCamelCase : Optional[int] = ctc_zero_infinity
# adapter
lowerCamelCase : Any = add_adapter
lowerCamelCase : List[str] = adapter_kernel_size
lowerCamelCase : List[str] = adapter_stride
lowerCamelCase : List[Any] = num_adapter_layers
lowerCamelCase : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : List[str] = list(__a )
lowerCamelCase : Dict = list(__a )
lowerCamelCase : int = list(__a )
lowerCamelCase : Any = xvector_output_dim
@property
def a__ ( self: Tuple )-> List[Any]:
return math.prod(self.conv_stride )
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> Union[str, Any]:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> List[Any]:
lowerCamelCase : Optional[Any] = tmp_path / """cache"""
lowerCamelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase : List[str] = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> List[str]:
lowerCamelCase : Dict = tmp_path / """cache"""
lowerCamelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase : Any = features.copy() if features else default_expected_features
lowerCamelCase : List[str] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase : Optional[int] = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCamelCase : List[str] = tmp_path / """cache"""
lowerCamelCase : Any = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
lowerCamelCase : List[Any] = features.copy() if features else default_expected_features
lowerCamelCase : Optional[Any] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase : str = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Any:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowerCamelCase : List[str] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
lowerCamelCase : Union[str, Any] = features.copy()
lowerCamelCase : Optional[Any] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase : Any = tmp_path / """cache"""
lowerCamelCase : Optional[int] = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Any:
lowerCamelCase : Dict = tmp_path / """cache"""
lowerCamelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase : Tuple = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> int:
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Dict = jsonl_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = [jsonl_path]
lowerCamelCase : Optional[Any] = tmp_path / """cache"""
lowerCamelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase : Optional[int] = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=("train",) ) -> Optional[int]:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
lowerCamelCase : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : int ) -> List[str]:
lowerCamelCase : Optional[int] = tmp_path / """cache"""
lowerCamelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase : Dict = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Optional[Any]:
lowerCamelCase : Dict = tmp_path / """cache"""
lowerCamelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase : str = features.copy() if features else default_expected_features
lowerCamelCase : List[str] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase : List[Any] = JsonDatasetReader({"""train""": jsonl_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> List[Any]:
if split:
lowerCamelCase : Tuple = {split: jsonl_path}
else:
lowerCamelCase : List[Any] = """train"""
lowerCamelCase : List[Any] = {"""train""": jsonl_path, """test""": jsonl_path}
lowerCamelCase : Union[str, Any] = tmp_path / """cache"""
lowerCamelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowerCamelCase : Union[str, Any] = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case ( UpperCamelCase__ : List[str] ) -> str:
return json.load(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : List[Any] ) -> Optional[Any]:
return [json.loads(UpperCamelCase__ ) for line in buffer]
class A__ :
"""simple docstring"""
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def a__ ( self: List[str] , __a: str , __a: List[str] , __a: str )-> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a ).write()
buffer.seek(0 )
lowerCamelCase : Tuple = load_json_function(__a )
assert isinstance(__a , __a )
assert isinstance(exported_content[0] , __a )
assert len(__a ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def a__ ( self: Optional[int] , __a: Tuple , __a: Any , __a: List[str] , __a: Tuple , __a: Dict )-> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a ).write()
buffer.seek(0 )
lowerCamelCase : Dict = load_json(__a )
assert isinstance(__a , __a )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__a ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def a__ ( self: Union[str, Any] , __a: List[Any] , __a: Any , __a: Dict )-> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase : str = load_json_function(__a )
assert isinstance(__a , __a )
assert isinstance(exported_content[0] , __a )
assert len(__a ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def a__ ( self: str , __a: Any , __a: int , __a: Tuple , __a: List[Any] , __a: Union[str, Any] )-> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase : Tuple = load_json(__a )
assert isinstance(__a , __a )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__a ) == 10
def a__ ( self: Optional[int] , __a: List[Any] )-> List[str]:
with pytest.raises(__a ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def a__ ( self: Tuple , __a: Tuple , __a: Union[str, Any] , __a: Union[str, Any] , __a: Any , __a: Optional[int] )-> Optional[int]:
lowerCamelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / f'test.json.{extension}'
lowerCamelCase : Tuple = str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(__a , __a , compression=__a ).write()
with fsspec.open(__a , """rb""" , compression="""infer""" ) as f:
lowerCamelCase : int = f.read()
with fsspec.open(__a , """rb""" , compression="""infer""" ) as f:
lowerCamelCase : List[str] = f.read()
assert exported_content == original_content
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ) -> Tuple:
# Construct model
if openai_config_file == "":
lowerCamelCase : Optional[Any] = OpenAIGPTConfig()
else:
lowerCamelCase : List[Any] = OpenAIGPTConfig.from_json_file(UpperCamelCase__ )
lowerCamelCase : Dict = OpenAIGPTModel(UpperCamelCase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
lowerCamelCase : List[str] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__lowerCamelCase :str = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__lowerCamelCase :Tuple = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__lowerCamelCase :Dict = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__lowerCamelCase :List[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__lowerCamelCase :Dict = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__lowerCamelCase :List[str] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__lowerCamelCase :Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__lowerCamelCase :Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__lowerCamelCase :Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__lowerCamelCase :Optional[int] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__lowerCamelCase :Union[str, Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__lowerCamelCase :Optional[Any] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__lowerCamelCase :List[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__lowerCamelCase :Union[str, Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__lowerCamelCase :Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__lowerCamelCase :List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__lowerCamelCase :List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__lowerCamelCase :Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__lowerCamelCase :Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase :Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__lowerCamelCase :Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__lowerCamelCase :Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase :Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__lowerCamelCase :Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase :Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__lowerCamelCase :List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__lowerCamelCase :Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__lowerCamelCase :List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Optional[Any] =FLAX_MODEL_MAPPING
__lowerCamelCase :Dict = auto_class_update(FlaxAutoModel)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[Any] =FLAX_MODEL_FOR_PRETRAINING_MAPPING
__lowerCamelCase :Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[Any] =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCamelCase :List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Optional[int] =FLAX_MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase :Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Optional[Any] =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCamelCase :List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : int =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase :Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Dict =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__lowerCamelCase :Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[Any] =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowerCamelCase :List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Optional[Any] =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__lowerCamelCase :Dict = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Dict =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__lowerCamelCase :List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[Any] =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCamelCase :Any = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Dict =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCamelCase :Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[str] =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__lowerCamelCase :Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase :int = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
snake_case__ : Optional[torch.FloatTensor] =None
class A__ ( __lowercase , __lowercase):
"""simple docstring"""
snake_case__ : Any =2
@register_to_config
def __init__( self: Any , __a: float = 0.02 , __a: float = 100 , __a: float = 1.0_07 , __a: float = 80 , __a: float = 0.05 , __a: float = 50 , )-> Optional[int]:
# standard deviation of the initial noise distribution
lowerCamelCase : Tuple = sigma_max
# setable values
lowerCamelCase : int = None
lowerCamelCase : np.IntTensor = None
lowerCamelCase : torch.FloatTensor = None # sigma(t_i)
def a__ ( self: List[str] , __a: torch.FloatTensor , __a: Optional[int] = None )-> torch.FloatTensor:
return sample
def a__ ( self: Tuple , __a: int , __a: Union[str, torch.device] = None )-> Any:
lowerCamelCase : Optional[Any] = num_inference_steps
lowerCamelCase : Union[str, Any] = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCamelCase : Dict = torch.from_numpy(__a ).to(__a )
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCamelCase : List[Any] = torch.tensor(__a , dtype=torch.floataa , device=__a )
def a__ ( self: int , __a: torch.FloatTensor , __a: float , __a: Optional[torch.Generator] = None )-> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Optional[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : str = self.config.s_noise * randn_tensor(sample.shape , generator=__a ).to(sample.device )
lowerCamelCase : str = sigma + gamma * sigma
lowerCamelCase : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def a__ ( self: Optional[Any] , __a: torch.FloatTensor , __a: float , __a: float , __a: torch.FloatTensor , __a: bool = True , )-> Union[KarrasVeOutput, Tuple]:
lowerCamelCase : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase : Tuple = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def a__ ( self: Dict , __a: torch.FloatTensor , __a: float , __a: float , __a: torch.FloatTensor , __a: torch.FloatTensor , __a: torch.FloatTensor , __a: bool = True , )-> Union[KarrasVeOutput, Tuple]:
lowerCamelCase : List[str] = sample_prev + sigma_prev * model_output
lowerCamelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def a__ ( self: Dict , __a: Dict , __a: Dict , __a: Union[str, Any] )-> str:
raise NotImplementedError()
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
__lowerCamelCase :Optional[Any] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCamelCase :Tuple = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case ( UpperCamelCase__ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def snake_case ( UpperCamelCase__ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def snake_case ( ) -> None:
lowerCamelCase : int = """Morse code here!"""
print(UpperCamelCase__ )
lowerCamelCase : List[Any] = encrypt(UpperCamelCase__ )
print(UpperCamelCase__ )
lowerCamelCase : Any = decrypt(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case ( ) -> Dict:
lowerCamelCase : Optional[int] = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()[0]
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCamelCase : Dict = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowerCamelCase : str = """ """.join(str(UpperCamelCase__ ).split(""" """ )[:-1] )
lowerCamelCase : str = """"""
lowerCamelCase : Any = eval(str(UpperCamelCase__ ).split(""" """ )[-1] )
lowerCamelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
lowerCamelCase : Optional[int] = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Tuple , __a: Any , __a: Optional[Any] )-> List[Any]:
lowerCamelCase : Any = params
lowerCamelCase : List[str] = np.array(__a )
lowerCamelCase : List[Any] = np.array([len(__a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self: Any , __a: int )-> List[Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self: List[Any] )-> Union[str, Any]:
return len(self.lengths )
def a__ ( self: str )-> List[str]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def a__ ( self: int )-> Dict:
lowerCamelCase : Tuple = self.params.max_model_input_size
lowerCamelCase : Union[str, Any] = self.lengths > max_len
logger.info(f'Splitting {sum(__a )} too long sequences.' )
def divide_chunks(__a: str , __a: Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(__a ) , __a )]
lowerCamelCase : int = []
lowerCamelCase : Any = []
if self.params.mlm:
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCamelCase : int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCamelCase : Tuple = np.insert(__a , 0 , __a )
if sub_s[-1] != sep_id:
lowerCamelCase : int = np.insert(__a , len(__a ) , __a )
assert len(__a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__a )
new_tok_ids.extend(__a )
new_lengths.extend([len(__a ) for l in sub_seqs] )
lowerCamelCase : Dict = np.array(__a )
lowerCamelCase : str = np.array(__a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Optional[int] = len(self )
lowerCamelCase : Union[str, Any] = self.lengths > 11
lowerCamelCase : Optional[int] = self.token_ids[indices]
lowerCamelCase : Tuple = self.lengths[indices]
lowerCamelCase : Any = len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def a__ ( self: List[Any] )-> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCamelCase : List[str] = self.params.special_tok_ids["""unk_token"""]
lowerCamelCase : Optional[int] = len(self )
lowerCamelCase : List[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCamelCase : int = (unk_occs / self.lengths) < 0.5
lowerCamelCase : Optional[int] = self.token_ids[indices]
lowerCamelCase : List[str] = self.lengths[indices]
lowerCamelCase : List[Any] = len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def a__ ( self: List[Any] )-> Optional[int]:
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def a__ ( self: List[str] , __a: Optional[int] )-> Dict:
lowerCamelCase : Union[str, Any] = [t[0] for t in batch]
lowerCamelCase : Dict = [t[1] for t in batch]
assert len(__a ) == len(__a )
# Max for paddings
lowerCamelCase : Any = max(__a )
# Pad token ids
if self.params.mlm:
lowerCamelCase : Tuple = self.params.special_tok_ids["""pad_token"""]
else:
lowerCamelCase : Optional[Any] = self.params.special_tok_ids["""unk_token"""]
lowerCamelCase : Optional[Any] = [list(t.astype(__a ) ) + [pad_idx] * (max_seq_len_ - len(__a )) for t in token_ids]
assert len(tk_ ) == len(__a )
assert all(len(__a ) == max_seq_len_ for t in tk_ )
lowerCamelCase : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCamelCase : List[str] = torch.tensor(__a ) # (bs)
return tk_t, lg_t
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :Optional[Any] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
import os
import string
import sys
__lowerCamelCase :Dict = 1 << 8
__lowerCamelCase :Any = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__lowerCamelCase :Optional[Any] = KEYMAP['up']
__lowerCamelCase :Optional[Any] = KEYMAP['left']
if sys.platform == "win32":
__lowerCamelCase :Dict = []
__lowerCamelCase :List[Any] = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__lowerCamelCase :int = ord(str(i))
def snake_case ( ) -> int:
if os.name == "nt":
import msvcrt
lowerCamelCase : Optional[Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCamelCase__ ) == 0:
# Read the keystroke
lowerCamelCase : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCamelCase : str = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCamelCase : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(UpperCamelCase__ )
if ord(UpperCamelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCamelCase : List[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCamelCase : Dict = cha[1]
else:
lowerCamelCase : Any = ch.decode(UpperCamelCase__ )
else:
lowerCamelCase : str = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCamelCase : Dict = sys.stdin.fileno()
lowerCamelCase : Optional[int] = termios.tcgetattr(UpperCamelCase__ )
try:
tty.setraw(UpperCamelCase__ )
lowerCamelCase : int = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCamelCase__ , termios.TCSADRAIN , UpperCamelCase__ )
return ch
def snake_case ( ) -> Dict:
lowerCamelCase : Tuple = get_raw_chars()
if ord(UpperCamelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCamelCase__ ) == KEYMAP["esc"]:
lowerCamelCase : str = get_raw_chars()
if ord(UpperCamelCase__ ) == KEYMAP["mod_int"]:
lowerCamelCase : Dict = get_raw_chars()
if ord(UpperCamelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCamelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCamelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A__ :
"""simple docstring"""
def __init__( self: Optional[int] , __a: str = "cpu" , __a: str = "openai/clip-vit-large-patch14" )-> None:
lowerCamelCase : List[str] = device
lowerCamelCase : Tuple = CLIPTokenizerFast.from_pretrained(__a )
lowerCamelCase : Dict = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
lowerCamelCase : Dict = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
lowerCamelCase : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCamelCase : List[Any] = torchvision.transforms.Resize(224 )
lowerCamelCase : Dict = torchvision.transforms.CenterCrop(224 )
def a__ ( self: Tuple , __a: str )-> List[str]:
lowerCamelCase : List[str] = self.resize(__a )
lowerCamelCase : str = self.center_crop(__a )
lowerCamelCase : int = self.normalize(__a )
return images
def __call__( self: int , __a: Optional[Any]=None , __a: Dict=None , **__a: int )-> Tuple:
lowerCamelCase : str = self.tokenizer(text=__a , **__a )
lowerCamelCase : Optional[int] = self.preprocess_img(__a )
lowerCamelCase : str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: List[str] , __a: Optional[Any]=10 , __a: Dict=0.01 , __a: Optional[Any]=None , __a: List[Any]=None , __a: Any=None , __a: int=None , __a: Tuple=None , __a: int=None , __a: Union[str, Any]=False , __a: str=True , __a: List[str]="image" , __a: Any=True , __a: Dict=False , __a: str=False , __a: Any=False , )-> None:
super().__init__()
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = device if device else get_device()
if vqgan:
lowerCamelCase : List[Any] = vqgan
else:
lowerCamelCase : Union[str, Any] = load_vqgan(self.device , conf_path=__a , ckpt_path=__a )
self.vqgan.eval()
if clip:
lowerCamelCase : str = clip
else:
lowerCamelCase : Tuple = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
lowerCamelCase : Any = ProcessorGradientFlow(device=self.device )
lowerCamelCase : List[Any] = iterations
lowerCamelCase : str = lr
lowerCamelCase : Optional[int] = log
lowerCamelCase : List[Any] = make_grid
lowerCamelCase : Optional[int] = return_val
lowerCamelCase : Optional[int] = quantize
lowerCamelCase : List[Any] = self.vqgan.decoder.z_shape
def a__ ( self: int , __a: Optional[Any]=None , __a: List[Any]=None , __a: List[Any]=5 , __a: List[str]=True )-> List[Any]:
lowerCamelCase : Optional[int] = []
if output_path is None:
lowerCamelCase : Optional[int] = """./animation.gif"""
if input_path is None:
lowerCamelCase : List[str] = self.save_path
lowerCamelCase : str = sorted(glob(input_path + """/*""" ) )
if not len(__a ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__a ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
lowerCamelCase : List[Any] = total_duration / len(__a )
lowerCamelCase : int = [frame_duration] * len(__a )
if extend_frames:
lowerCamelCase : Tuple = 1.5
lowerCamelCase : str = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__a ) )
imageio.mimsave(__a , __a , duration=__a )
print(f'gif saved to {output_path}' )
def a__ ( self: Optional[Any] , __a: Tuple=None , __a: Any=None )-> Optional[int]:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
lowerCamelCase : Tuple = preprocess(Image.open(__a ) , target_image_size=256 ).to(self.device )
lowerCamelCase : Dict = preprocess_vqgan(__a )
lowerCamelCase , *lowerCamelCase : List[str] = self.vqgan.encode(__a )
return z
def a__ ( self: Optional[Any] , __a: Dict )-> Dict:
lowerCamelCase : List[str] = self.latent.detach().requires_grad_()
lowerCamelCase : int = base_latent + transform_vector
if self.quantize:
lowerCamelCase , *lowerCamelCase : Union[str, Any] = self.vqgan.quantize(__a )
else:
lowerCamelCase : Optional[Any] = trans_latent
return self.vqgan.decode(__a )
def a__ ( self: str , __a: Optional[int] , __a: List[str] , __a: List[Any]=None )-> Optional[Any]:
lowerCamelCase : Optional[int] = self.clip_preprocessor(text=__a , images=__a , return_tensors="""pt""" , padding=__a )
lowerCamelCase : Optional[int] = self.clip(**__a )
lowerCamelCase : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase : List[str] = similarity_logits * weights
return similarity_logits.sum()
def a__ ( self: Any , __a: Tuple , __a: Tuple , __a: int )-> str:
lowerCamelCase : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , __a , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
lowerCamelCase : Optional[Any] = self._get_clip_similarity(neg_prompts["""prompts"""] , __a , weights=neg_prompts["""weights"""] )
else:
lowerCamelCase : int = torch.tensor([1] , device=self.device )
lowerCamelCase : Dict = -torch.log(__a ) + torch.log(__a )
return loss
def a__ ( self: Optional[int] , __a: List[str] , __a: Optional[int] , __a: Dict )-> int:
lowerCamelCase : Tuple = torch.randn_like(self.latent , requires_grad=__a , device=self.device )
lowerCamelCase : int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase : Optional[Any] = self._add_vector(__a )
lowerCamelCase : Optional[Any] = loop_post_process(__a )
lowerCamelCase : Dict = self._get_CLIP_loss(__a , __a , __a )
print("""CLIP loss""" , __a )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a__ ( self: Optional[Any] , __a: Optional[int] , __a: Optional[int] , __a: Tuple )-> List[Any]:
wandb.init(reinit=__a , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
lowerCamelCase : Dict = Image.open(__a )
lowerCamelCase : Optional[Any] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(__a ) )
def a__ ( self: str , __a: Union[str, Any] )-> Dict:
if not prompts:
return []
lowerCamelCase : Dict = []
lowerCamelCase : str = []
if isinstance(__a , __a ):
lowerCamelCase : int = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__a , (tuple, list) ):
lowerCamelCase : str = prompt[0]
lowerCamelCase : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
lowerCamelCase , lowerCamelCase : str = prompt.split(""":""" )
lowerCamelCase : int = float(__a )
else:
lowerCamelCase : Union[str, Any] = prompt
lowerCamelCase : Optional[int] = 1.0
processed_prompts.append(__a )
weights.append(__a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a , device=self.device ),
}
def a__ ( self: List[Any] , __a: int , __a: Any=None , __a: Tuple=None , __a: Tuple=True , __a: Any=False , __a: Any=True , __a: List[str]=True , __a: int=None , )-> Union[str, Any]:
if image_path:
lowerCamelCase : str = self._get_latent(__a )
else:
lowerCamelCase : List[str] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__a , __a , __a )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase : int = self.process_prompts(__a )
lowerCamelCase : int = self.process_prompts(__a )
if save_final and save_path is None:
lowerCamelCase : str = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__a ):
os.makedirs(__a )
else:
lowerCamelCase : Dict = save_path + """_""" + get_timestamp()
os.makedirs(__a )
lowerCamelCase : Tuple = save_path
lowerCamelCase : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__a ) )
lowerCamelCase : Optional[int] = loop_post_process(__a )
for iter, transformed_img in enumerate(self._optimize_CLIP(__a , __a , __a ) ):
if show_intermediate:
show_pil(__a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__a )} )
if show_final:
show_pil(__a )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}_final.png' ) )
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
import string
from math import logaa
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
lowerCamelCase : str = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
lowerCamelCase : int = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> tuple[int, int]:
lowerCamelCase : int = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase : Optional[Any] = corpus_without_punctuation.split("""\n""" )
lowerCamelCase : Any = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
return round(tf * idf , 3 )
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> List[str]:
if height >= 1:
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
move_disk(UpperCamelCase__ , UpperCamelCase__ )
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> Tuple:
print("""moving disk from""" , UpperCamelCase__ , """to""" , UpperCamelCase__ )
def snake_case ( ) -> Dict:
lowerCamelCase : Dict = int(input("""Height of hanoi: """ ).strip() )
move_tower(UpperCamelCase__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : list[list[int]] ) -> bool:
lowerCamelCase : Dict = len(UpperCamelCase__ )
# We need to create solution object to save path.
lowerCamelCase : Any = [[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
lowerCamelCase : Dict = run_maze(UpperCamelCase__ , 0 , 0 , UpperCamelCase__ )
if solved:
print("""\n""".join(str(UpperCamelCase__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def snake_case ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[int]] ) -> bool:
lowerCamelCase : Any = len(UpperCamelCase__ )
# Final check point.
if i == j == (size - 1):
lowerCamelCase : int = 1
return True
lowerCamelCase : int = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase : Optional[int] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase : str = 1
# check for directions
if (
run_maze(UpperCamelCase__ , i + 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j + 1 , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j - 1 , UpperCamelCase__ )
):
return True
lowerCamelCase : Optional[int] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , ) -> Optional[Any]:
if attention_mask is None:
lowerCamelCase : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase : List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCamelCase__ )
if decoder_head_mask is None:
lowerCamelCase : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
if cross_attn_head_mask is None:
lowerCamelCase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class A__ :
"""simple docstring"""
def __init__( self: Dict , __a: Any , __a: Optional[int]=13 , __a: Tuple=7 , __a: Optional[int]=True , __a: Optional[int]=False , __a: List[str]=99 , __a: str=16 , __a: str=2 , __a: List[str]=4 , __a: Union[str, Any]=4 , __a: Dict="relu" , __a: int=0.1 , __a: str=0.1 , __a: Union[str, Any]=0.0 , __a: Optional[Any]=0.0 , __a: List[Any]=20 , __a: str=2 , __a: int=1 , __a: Optional[Any]=0 , )-> Optional[int]:
lowerCamelCase : Any = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : List[str] = seq_length
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : str = use_labels
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : Dict = decoder_layerdrop
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : List[str] = eos_token_id
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : Tuple = bos_token_id
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[str] = self.eos_token_id # Eos Token
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase : List[str] = self.get_config()
lowerCamelCase : Tuple = prepare_mam_aaa_inputs_dict(__a , __a , __a )
return config, inputs_dict
def a__ ( self: str )-> int:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def a__ ( self: List[str] )-> Optional[Any]:
lowerCamelCase , lowerCamelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self: Any , __a: Optional[int] , __a: List[Any] )-> List[str]:
lowerCamelCase : Dict = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval()
lowerCamelCase : Any = inputs_dict["""input_ids"""]
lowerCamelCase : Union[str, Any] = inputs_dict["""attention_mask"""]
lowerCamelCase : Dict = inputs_dict["""head_mask"""]
# first forward pass
lowerCamelCase : Dict = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
lowerCamelCase , lowerCamelCase : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase : Tuple = model(__a , attention_mask=__a )["""last_hidden_state"""]
lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , past_key_values=__a )[
"""last_hidden_state"""
]
# select random slice
lowerCamelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-2 ) )
def a__ ( self: Union[str, Any] , __a: str , __a: Dict )-> Any:
lowerCamelCase : Union[str, Any] = MaMaaaModel(config=__a ).to(__a ).eval()
lowerCamelCase : Any = model(**__a )
lowerCamelCase : Any = outputs.encoder_last_hidden_state
lowerCamelCase : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : List[Any] = model.get_encoder()
encoder.save_pretrained(__a )
lowerCamelCase : Union[str, Any] = MaMaaaEncoder.from_pretrained(__a ).to(__a )
lowerCamelCase : List[Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Dict = model.get_decoder()
decoder.save_pretrained(__a )
lowerCamelCase : Dict = MaMaaaDecoder.from_pretrained(__a ).to(__a )
lowerCamelCase : int = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ : int =(
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case__ : List[Any] =True
snake_case__ : Optional[Any] =True
snake_case__ : Dict =False
snake_case__ : Optional[Any] =False
def a__ ( self: Tuple , __a: Dict , __a: List[Any] , __a: Union[str, Any] , __a: Any , __a: Optional[Any] )-> Dict:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def a__ ( self: Any )-> List[str]:
lowerCamelCase : Any = MaMaaaModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a )
def a__ ( self: str )-> str:
self.config_tester.run_common_tests()
def a__ ( self: Optional[int] )-> Any:
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
lowerCamelCase , lowerCamelCase : str = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def a__ ( self: Union[str, Any] )-> str:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCamelCase : Tuple = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = copy.deepcopy(self._prepare_for_class(__a , __a ) )
if not self.is_encoder_decoder:
lowerCamelCase : str = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCamelCase : List[Any] = inputs["""input_ids"""]
lowerCamelCase : int = inputs.get("""decoder_input_ids""" , __a )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __a )
lowerCamelCase : Dict = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCamelCase : Any = wte(__a )
else:
lowerCamelCase : Optional[int] = wte(__a )
lowerCamelCase : Tuple = wte(__a )
with torch.no_grad():
model(**__a )[0]
def a__ ( self: List[str] )-> List[str]:
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : List[str] = input_dict["""input_ids"""]
lowerCamelCase : int = input_ids.ne(1 ).to(__a )
lowerCamelCase : Any = MaMaaaForConditionalGeneration(__a ).eval().to(__a )
if torch_device == "cuda":
model.half()
model.generate(__a , attention_mask=__a )
model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 )
def snake_case ( UpperCamelCase__ : Dict ) -> Optional[Any]:
return torch.tensor(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
__lowerCamelCase :List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Any )-> str:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : Dict = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
lowerCamelCase : List[Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
lowerCamelCase : int = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
lowerCamelCase : Any = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**__a )[0]
lowerCamelCase : List[Any] = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , __a )
# change to expected output here
lowerCamelCase : int = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def a__ ( self: Optional[Any] )-> Optional[Any]:
lowerCamelCase : Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
# change to intended input
lowerCamelCase : Union[str, Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
lowerCamelCase : Optional[Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
lowerCamelCase : Any = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
lowerCamelCase : str = model(**__a )[0]
lowerCamelCase : Dict = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __a )
# change to expected output here
lowerCamelCase : str = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def a__ ( self: str )-> Optional[int]:
lowerCamelCase : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
lowerCamelCase : Any = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCamelCase : List[str] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCamelCase : int = tokenizer(__a , padding=__a , return_tensors="""pt""" )
lowerCamelCase : str = model.generate(
input_ids=dct["""input_ids"""].to(__a ) , attention_mask=dct["""attention_mask"""].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCamelCase : Tuple = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCamelCase : Optional[int] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a )
assert generated == expected_en
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : int ='''trocr'''
snake_case__ : int =['''past_key_values''']
snake_case__ : Optional[Any] ={
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self: Union[str, Any] , __a: Dict=50_265 , __a: str=1_024 , __a: Optional[Any]=12 , __a: List[str]=16 , __a: List[str]=4_096 , __a: int="gelu" , __a: List[str]=512 , __a: Union[str, Any]=0.1 , __a: Optional[int]=0.0 , __a: Tuple=0.0 , __a: Tuple=2 , __a: List[Any]=0.02 , __a: Tuple=0.0 , __a: str=True , __a: Optional[Any]=False , __a: str=True , __a: Optional[int]=True , __a: Union[str, Any]=1 , __a: Optional[int]=0 , __a: Any=2 , **__a: Any , )-> Any:
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = d_model
lowerCamelCase : Tuple = decoder_layers
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Tuple = decoder_ffn_dim
lowerCamelCase : List[str] = activation_function
lowerCamelCase : List[str] = max_position_embeddings
lowerCamelCase : Optional[Any] = dropout
lowerCamelCase : Dict = attention_dropout
lowerCamelCase : Dict = activation_dropout
lowerCamelCase : Any = init_std
lowerCamelCase : List[Any] = decoder_layerdrop
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Tuple = scale_embedding
lowerCamelCase : int = use_learned_position_embeddings
lowerCamelCase : str = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def snake_case ( *UpperCamelCase__ : Any ) -> Tuple:
with open(UpperCamelCase__ , """r""" ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
__lowerCamelCase :Union[str, Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowerCamelCase :Tuple = torch.device('cuda', local_rank)
__lowerCamelCase :int = socket.gethostname()
__lowerCamelCase :Tuple = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowerCamelCase :Union[str, Any] = dist.get_rank()
__lowerCamelCase :str = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def snake_case ( UpperCamelCase__ : Optional[Any] ) -> Any:
lowerCamelCase , lowerCamelCase : Dict = image.size
lowerCamelCase , lowerCamelCase : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCamelCase : Optional[int] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
lowerCamelCase : Optional[Any] = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
lowerCamelCase : List[Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Tuple , __a: VQModel , __a: UNetaDModel , __a: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , )-> Optional[int]:
super().__init__()
self.register_modules(vqvae=__a , unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self: Optional[Any] , __a: Union[torch.Tensor, PIL.Image.Image] = None , __a: Optional[int] = 1 , __a: Optional[int] = 100 , __a: Optional[float] = 0.0 , __a: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a: Optional[str] = "pil" , __a: bool = True , )-> Union[Tuple, ImagePipelineOutput]:
if isinstance(__a , PIL.Image.Image ):
lowerCamelCase : List[Any] = 1
elif isinstance(__a , torch.Tensor ):
lowerCamelCase : List[Any] = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__a )}' )
if isinstance(__a , PIL.Image.Image ):
lowerCamelCase : List[Any] = preprocess(__a )
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCamelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCamelCase : int = next(self.unet.parameters() ).dtype
lowerCamelCase : List[Any] = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
lowerCamelCase : int = image.to(device=self.device , dtype=__a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__a , device=self.device )
lowerCamelCase : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : int = {}
if accepts_eta:
lowerCamelCase : Optional[int] = eta
for t in self.progress_bar(__a ):
# concat latents and low resolution image in the channel dimension.
lowerCamelCase : List[str] = torch.cat([latents, image] , dim=1 )
lowerCamelCase : int = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
lowerCamelCase : List[Any] = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Optional[Any] = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# decode the image latents with the VQVAE
lowerCamelCase : Optional[int] = self.vqvae.decode(__a ).sample
lowerCamelCase : Union[str, Any] = torch.clamp(__a , -1.0 , 1.0 )
lowerCamelCase : Any = image / 2 + 0.5
lowerCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : Optional[Any] = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] =GPTSanJapaneseTokenizer
snake_case__ : Optional[Any] =False
snake_case__ : Optional[int] ={'''do_clean_text''': False, '''add_prefix_space''': False}
def a__ ( self: List[Any] )-> int:
super().setUp()
# fmt: off
lowerCamelCase : Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase : Tuple = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCamelCase : Tuple = {"""unk_token""": """<unk>"""}
lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__a ) )
def a__ ( self: List[str] , **__a: Any )-> str:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: List[Any] , __a: List[str] )-> int:
lowerCamelCase : Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCamelCase : str = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def a__ ( self: Optional[int] , __a: Any )-> str:
lowerCamelCase , lowerCamelCase : Optional[int] = self.get_input_output_texts(__a )
lowerCamelCase : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : Tuple = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def a__ ( self: List[str] )-> Tuple:
pass # TODO add if relevant
def a__ ( self: Dict )-> Any:
pass # TODO add if relevant
def a__ ( self: int )-> Optional[Any]:
pass # TODO add if relevant
def a__ ( self: Optional[Any] )-> str:
lowerCamelCase : Any = self.get_tokenizer()
# Testing tokenization
lowerCamelCase : Dict = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCamelCase : List[str] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCamelCase : str = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
lowerCamelCase : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
lowerCamelCase : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Dict )-> Any:
lowerCamelCase : Any = self.get_tokenizer()
# Testing tokenization
lowerCamelCase : List[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCamelCase : Union[str, Any] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCamelCase : List[Any] = tokenizer.encode(__a )
lowerCamelCase : int = tokenizer.decode(__a )
self.assertEqual(__a , __a )
@slow
def a__ ( self: int )-> List[Any]:
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCamelCase : Tuple = """こんにちは、世界。"""
lowerCamelCase : List[str] = """こんばんは、㔺界。😀"""
lowerCamelCase : Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCamelCase : Optional[Any] = tokenizer.encode(prefix_text + input_text )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCamelCase : int = tokenizer.encode(__a , prefix_text=__a )
lowerCamelCase : str = tokenizer.decode(__a )
lowerCamelCase : Any = tokenizer.decode(__a )
lowerCamelCase : str = tokenizer.decode(__a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
@slow
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCamelCase : Dict = """こんにちは、世界。"""
lowerCamelCase : Any = """こんばんは、㔺界。😀"""
lowerCamelCase : List[str] = len(tokenizer.encode(__a ) ) - 2
lowerCamelCase : int = len(tokenizer.encode(__a ) ) - 2
lowerCamelCase : str = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase : int = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase : Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase : Tuple = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase : Optional[int] = tokenizer(__a , prefix_text=__a ).token_type_ids
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""あンいワ""" )
lowerCamelCase : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCamelCase : int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertNotEqual(__a , __a )
self.assertNotEqual(__a , __a )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def a__ ( self: Optional[int] )-> Tuple:
lowerCamelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCamelCase : Optional[int] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCamelCase : int = tokenizer(__a , padding=__a )
lowerCamelCase : List[str] = tokenizer.batch_encode_plus(__a , padding=__a )
# fmt: off
lowerCamelCase : Optional[int] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
lowerCamelCase : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase : Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __a )
self.assertListEqual(x_token.token_type_ids , __a )
self.assertListEqual(x_token.attention_mask , __a )
self.assertListEqual(x_token_a.input_ids , __a )
self.assertListEqual(x_token_a.token_type_ids , __a )
self.assertListEqual(x_token_a.attention_mask , __a )
def a__ ( self: Any )-> Optional[Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def a__ ( self: Tuple )-> Union[str, Any]:
# tokenizer has no padding token
pass
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : list ) -> list:
if len(UpperCamelCase__ ) <= 1:
return [tuple(UpperCamelCase__ )]
lowerCamelCase : Optional[int] = []
def generate(UpperCamelCase__ : int , UpperCamelCase__ : list ):
lowerCamelCase : List[Any] = [0] * n
res.append(tuple(UpperCamelCase__ ) )
lowerCamelCase : Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase , lowerCamelCase : Optional[int] = arr[i], arr[0]
else:
lowerCamelCase , lowerCamelCase : Optional[int] = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase__ ) )
c[i] += 1
lowerCamelCase : Any = 0
else:
lowerCamelCase : Optional[Any] = 0
i += 1
generate(len(UpperCamelCase__ ) , UpperCamelCase__ )
return res
if __name__ == "__main__":
__lowerCamelCase :Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__lowerCamelCase :Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :List[str] = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] ='''timm_backbone'''
def __init__( self: List[str] , __a: Tuple=None , __a: List[str]=3 , __a: List[str]=True , __a: Any=True , __a: Union[str, Any]=None , **__a: Union[str, Any] , )-> List[Any]:
super().__init__(**__a )
lowerCamelCase : Any = backbone
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : Optional[int] = features_only
lowerCamelCase : Any = use_pretrained_backbone
lowerCamelCase : Tuple = True
lowerCamelCase : Union[str, Any] = out_indices if out_indices is not None else (-1,)
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : dict ) -> set:
lowerCamelCase : int = set()
# edges = list of graph's edges
lowerCamelCase : List[Any] = get_edges(UpperCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase , lowerCamelCase : List[str] = edges.pop()
chosen_vertices.add(UpperCamelCase__ )
chosen_vertices.add(UpperCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(UpperCamelCase__ )
return chosen_vertices
def snake_case ( UpperCamelCase__ : dict ) -> set:
lowerCamelCase : Tuple = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : List[str]
snake_case__ : Optional[str] =None
# Automatically constructed
snake_case__ : ClassVar[str] ="dict"
snake_case__ : ClassVar[Any] =None
snake_case__ : str =field(default='''Translation''' , init=__lowercase , repr=__lowercase)
def __call__( self: Union[str, Any] )-> List[str]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def a__ ( self: Optional[int] )-> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : Optional[List] =None
snake_case__ : Optional[int] =None
snake_case__ : Optional[str] =None
# Automatically constructed
snake_case__ : ClassVar[str] ="dict"
snake_case__ : ClassVar[Any] =None
snake_case__ : str =field(default='''TranslationVariableLanguages''' , init=__lowercase , repr=__lowercase)
def a__ ( self: Optional[int] )-> Any:
lowerCamelCase : Tuple = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase : List[Any] = len(self.languages ) if self.languages else None
def __call__( self: Optional[Any] )-> List[Any]:
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def a__ ( self: Optional[int] , __a: List[Any] )-> Any:
lowerCamelCase : str = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({", ".join(__a )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase : Any = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase , lowerCamelCase : Tuple = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def a__ ( self: Tuple )-> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
from math import isclose, sqrt
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> tuple[float, float, float]:
lowerCamelCase : List[Any] = point_y / 4 / point_x
lowerCamelCase : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase : Tuple = outgoing_gradient**2 + 4
lowerCamelCase : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase : Union[str, Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase : Union[str, Any] = x_minus if isclose(UpperCamelCase__ , UpperCamelCase__ ) else x_plus
lowerCamelCase : str = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case ( UpperCamelCase__ : float = 1.4 , UpperCamelCase__ : float = -9.6 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : float = first_x_coord
lowerCamelCase : float = first_y_coord
lowerCamelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = next_point(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class A__ :
"""simple docstring"""
def __init__( self: int , __a: List[str] , __a: Union[str, Any] , __a: Union[str, Any] , __a: Tuple=None , __a: str=None )-> Union[str, Any]:
lowerCamelCase : Any = start
lowerCamelCase : Union[str, Any] = end
lowerCamelCase : List[Any] = val
lowerCamelCase : Optional[int] = (start + end) // 2
lowerCamelCase : Dict = left
lowerCamelCase : str = right
def __repr__( self: List[Any] )-> Optional[Any]:
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class A__ :
"""simple docstring"""
def __init__( self: int , __a: Sequence , __a: Optional[Any] )-> Any:
lowerCamelCase : Optional[int] = collection
lowerCamelCase : str = function
if self.collection:
lowerCamelCase : Optional[Any] = self._build_tree(0 , len(__a ) - 1 )
def a__ ( self: List[Any] , __a: Tuple , __a: Union[str, Any] )-> Any:
self._update_tree(self.root , __a , __a )
def a__ ( self: Any , __a: List[str] , __a: Optional[int] )-> Union[str, Any]:
return self._query_range(self.root , __a , __a )
def a__ ( self: List[Any] , __a: str , __a: Dict )-> Dict:
if start == end:
return SegmentTreeNode(__a , __a , self.collection[start] )
lowerCamelCase : int = (start + end) // 2
lowerCamelCase : Tuple = self._build_tree(__a , __a )
lowerCamelCase : Union[str, Any] = self._build_tree(mid + 1 , __a )
return SegmentTreeNode(__a , __a , self.fn(left.val , right.val ) , __a , __a )
def a__ ( self: Union[str, Any] , __a: Dict , __a: str , __a: Optional[Any] )-> Any:
if node.start == i and node.end == i:
lowerCamelCase : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , __a , __a )
else:
self._update_tree(node.right , __a , __a )
lowerCamelCase : Optional[int] = self.fn(node.left.val , node.right.val )
def a__ ( self: Tuple , __a: Dict , __a: Optional[Any] , __a: int )-> Optional[int]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __a , __a )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __a , node.mid ) , self._query_range(node.right , node.mid + 1 , __a ) , )
else:
# range in right child tree
return self._query_range(node.right , __a , __a )
def a__ ( self: List[Any] )-> Any:
if self.root is not None:
lowerCamelCase : Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
lowerCamelCase : List[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
__lowerCamelCase :int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase):
"""simple docstring"""
@parameterized.expand([(None,), ("""foo.json""",)] )
def a__ ( self: Union[str, Any] , __a: Dict )-> List[str]:
lowerCamelCase : int = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , config_name=__a )
lowerCamelCase : Union[str, Any] = GenerationConfig.from_pretrained(__a , config_name=__a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : List[str] = AutoConfig.from_pretrained("""gpt2""" )
lowerCamelCase : Optional[int] = GenerationConfig.from_model_config(__a )
lowerCamelCase : Dict = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__a , __a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = GenerationConfig()
lowerCamelCase : Optional[int] = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
lowerCamelCase : Any = copy.deepcopy(__a )
lowerCamelCase : List[str] = generation_config.update(**__a )
# update_kwargs was not modified (no side effects)
self.assertEqual(__a , __a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__a , {"""foo""": """bar"""} )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Tuple = GenerationConfig()
lowerCamelCase : Any = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(__a )
lowerCamelCase : List[str] = GenerationConfig.from_pretrained(__a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowerCamelCase : Dict = GenerationConfig.from_model_config(__a )
assert not hasattr(__a , """foo""" ) # no new kwargs should be initialized if from config
def a__ ( self: str )-> List[Any]:
lowerCamelCase : Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __a )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase : int = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a )
lowerCamelCase : Dict = GenerationConfig.from_pretrained(__a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> int:
lowerCamelCase : Dict = TOKEN
HfFolder.save_token(__a )
@classmethod
def a__ ( cls: List[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def a__ ( self: int )-> Any:
lowerCamelCase : int = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowerCamelCase : Union[str, Any] = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="""test-generation-config""" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase : str = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def a__ ( self: List[Any] )-> Any:
lowerCamelCase : Any = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowerCamelCase : Any = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowerCamelCase :Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : tuple , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=False , ) -> Union[str, Any]:
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ) -> Dict:
lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowerCamelCase : Dict = """cpu"""
lowerCamelCase : Optional[Any] = Path(UpperCamelCase__ )
# VAE DECODER
lowerCamelCase : List[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowerCamelCase : Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCamelCase : Dict = vae_decoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , 25 , 25 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCamelCase__ , )
del vae_decoder
if __name__ == "__main__":
__lowerCamelCase :List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__lowerCamelCase :int = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Any , __a: float )-> float:
return 0.0
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int ) -> tuple[int | float, int | float]:
lowerCamelCase : str = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase : str = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def snake_case ( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None:
lowerCamelCase : Optional[int] = 512
lowerCamelCase : List[str] = [1] + [0] * (size - 1)
lowerCamelCase : List[Any] = [filter_type.process(UpperCamelCase__ ) for item in inputs]
lowerCamelCase : str = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase : List[Any] = np.abs(np.fft.fft(UpperCamelCase__ ) )
lowerCamelCase : Tuple = 20 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
lowerCamelCase : Union[str, Any] = get_bounds(UpperCamelCase__ , UpperCamelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(UpperCamelCase__ )
plt.show()
def snake_case ( UpperCamelCase__ : FilterType , UpperCamelCase__ : int ) -> None:
lowerCamelCase : str = 512
lowerCamelCase : str = [1] + [0] * (size - 1)
lowerCamelCase : Optional[int] = [filter_type.process(UpperCamelCase__ ) for item in inputs]
lowerCamelCase : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase : Tuple = np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) )
plt.show()
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
lowerCamelCase : Optional[Any] = """The dog is cute and lives in the garden house"""
lowerCamelCase : Optional[Any] = jnp.array([tokenizer.encode(__a )] )
lowerCamelCase : Tuple = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
lowerCamelCase : List[Any] = model(__a )["""last_hidden_state"""]
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __a , atol=1e-3 ) )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :Optional[Any] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase :Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int=8 ) -> List[str]:
lowerCamelCase : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: UNetaDConditionModel , __a: DDPMScheduler , __a: VQModel , )-> Optional[Any]:
super().__init__()
self.register_modules(
unet=__a , scheduler=__a , movq=__a , )
lowerCamelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self: Optional[Any] , __a: Optional[int] , __a: Any , __a: List[Any] , __a: str , __a: List[str] , __a: Dict )-> Dict:
if latents is None:
lowerCamelCase : Optional[Any] = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase : int = latents.to(__a )
lowerCamelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def a__ ( self: Optional[int] , __a: Optional[int]=0 )-> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def a__ ( self: List[Any] , __a: List[Any]=0 )-> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase , lowerCamelCase : Any = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
# We'll offload the last model manually.
lowerCamelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__( self: List[str] , __a: Union[torch.FloatTensor, List[torch.FloatTensor]] , __a: Union[torch.FloatTensor, List[torch.FloatTensor]] , __a: int = 512 , __a: int = 512 , __a: int = 100 , __a: float = 4.0 , __a: int = 1 , __a: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a: Optional[torch.FloatTensor] = None , __a: Optional[str] = "pil" , __a: bool = True , )-> Optional[int]:
lowerCamelCase : int = self._execution_device
lowerCamelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(__a , __a ):
lowerCamelCase : Optional[Any] = torch.cat(__a , dim=0 )
lowerCamelCase : Tuple = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__a , __a ):
lowerCamelCase : Union[str, Any] = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase : List[Any] = image_embeds.repeat_interleave(__a , dim=0 )
lowerCamelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(__a , dim=0 )
lowerCamelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
lowerCamelCase : List[str] = self.scheduler.timesteps
lowerCamelCase : Tuple = self.unet.config.in_channels
lowerCamelCase , lowerCamelCase : Dict = downscale_height_and_width(__a , __a , self.movq_scale_factor )
# create initial latent
lowerCamelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : Tuple = {"""image_embeds""": image_embeds}
lowerCamelCase : Optional[int] = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase , lowerCamelCase : List[str] = noise_pred.chunk(2 )
lowerCamelCase , lowerCamelCase : Any = variance_pred.chunk(2 )
lowerCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase , lowerCamelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Any = self.scheduler.step(
__a , __a , __a , generator=__a , )[0]
# post-processing
lowerCamelCase : str = self.movq.decode(__a , force_not_quantize=__a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCamelCase : str = image * 0.5 + 0.5
lowerCamelCase : List[str] = image.clamp(0 , 1 )
lowerCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase : Dict = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCamelCase :Optional[Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCamelCase :List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Dict=100 , UpperCamelCase__ : int=" " ) -> List[str]:
lowerCamelCase : int = text.split(UpperCamelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ )]
def snake_case ( UpperCamelCase__ : dict ) -> dict:
lowerCamelCase , lowerCamelCase : Optional[Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(UpperCamelCase__ ):
titles.append(title if title is not None else """""" )
texts.append(UpperCamelCase__ )
return {"title": titles, "text": texts}
def snake_case ( UpperCamelCase__ : dict , UpperCamelCase__ : DPRContextEncoder , UpperCamelCase__ : DPRContextEncoderTokenizerFast ) -> dict:
lowerCamelCase : Dict = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCamelCase : Dict = ctx_encoder(input_ids.to(device=UpperCamelCase__ ) , return_dict=UpperCamelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def snake_case ( UpperCamelCase__ : "RagExampleArguments" , UpperCamelCase__ : "ProcessingArguments" , UpperCamelCase__ : "IndexHnswArguments" , ) -> str:
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCamelCase : Union[str, Any] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCamelCase : List[Any] = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCamelCase : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCamelCase__ )
lowerCamelCase : List[str] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCamelCase : List[str] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCamelCase : List[Any] = dataset.map(
partial(UpperCamelCase__ , ctx_encoder=UpperCamelCase__ , ctx_tokenizer=UpperCamelCase__ ) , batched=UpperCamelCase__ , batch_size=processing_args.batch_size , features=UpperCamelCase__ , )
# And finally save your dataset
lowerCamelCase : Tuple = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(UpperCamelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCamelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=UpperCamelCase__ )
# And save the index
lowerCamelCase : int = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(UpperCamelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : str =field(
default=str(Path(__lowercase).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''') , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
snake_case__ : str =field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
snake_case__ : str =field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
snake_case__ : Optional[str] =field(
default=str(Path(__lowercase).parent / '''test_run''' / '''dummy-kb''') , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : Optional[int] =field(
default=__lowercase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
snake_case__ : int =field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : int =field(
default=7_68 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
snake_case__ : int =field(
default=1_28 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCamelCase :List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase :Union[str, Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCamelCase :str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AudioLDMPipeline
snake_case__ : int =TEXT_TO_AUDIO_PARAMS
snake_case__ : Dict =TEXT_TO_AUDIO_BATCH_PARAMS
snake_case__ : Optional[Any] =frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def a__ ( self: List[Any] )-> Optional[int]:
torch.manual_seed(0 )
lowerCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
lowerCamelCase : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
lowerCamelCase : Union[str, Any] = ClapTextModelWithProjection(__a )
lowerCamelCase : List[Any] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
lowerCamelCase : List[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
lowerCamelCase : Optional[Any] = SpeechTaHifiGan(__a )
lowerCamelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a__ ( self: Any , __a: Optional[Any] , __a: List[str]=0 )-> Union[str, Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : List[str] = torch.manual_seed(__a )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a__ ( self: List[Any] )-> Optional[Any]:
lowerCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Optional[int] = self.get_dummy_components()
lowerCamelCase : List[str] = AudioLDMPipeline(**__a )
lowerCamelCase : Tuple = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = audioldm_pipe(**__a )
lowerCamelCase : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
lowerCamelCase : str = audio[:10]
lowerCamelCase : Any = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self: Tuple )-> Dict:
lowerCamelCase : str = self.get_dummy_components()
lowerCamelCase : int = AudioLDMPipeline(**__a )
lowerCamelCase : Tuple = audioldm_pipe.to(__a )
lowerCamelCase : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : int = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
lowerCamelCase : Union[str, Any] = audioldm_pipe(**__a )
lowerCamelCase : List[Any] = output.audios[0]
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__a )
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
lowerCamelCase : Dict = audioldm_pipe.tokenizer(
__a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="""pt""" , )
lowerCamelCase : List[Any] = text_inputs["""input_ids"""].to(__a )
lowerCamelCase : List[str] = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase : Any = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase : List[str] = F.normalize(__a , dim=-1 )
lowerCamelCase : str = prompt_embeds
# forward
lowerCamelCase : Dict = audioldm_pipe(**__a )
lowerCamelCase : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : str = self.get_dummy_components()
lowerCamelCase : List[Any] = AudioLDMPipeline(**__a )
lowerCamelCase : List[Any] = audioldm_pipe.to(__a )
lowerCamelCase : Tuple = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : int = negative_prompt
lowerCamelCase : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
lowerCamelCase : Any = audioldm_pipe(**__a )
lowerCamelCase : Optional[Any] = output.audios[0]
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * [inputs.pop("""prompt""" )]
lowerCamelCase : int = []
for p in [prompt, negative_prompt]:
lowerCamelCase : Any = audioldm_pipe.tokenizer(
__a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="""pt""" , )
lowerCamelCase : Dict = text_inputs["""input_ids"""].to(__a )
lowerCamelCase : List[Any] = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase : str = F.normalize(__a , dim=-1 )
embeds.append(__a )
lowerCamelCase , lowerCamelCase : Union[str, Any] = embeds
# forward
lowerCamelCase : List[str] = audioldm_pipe(**__a )
lowerCamelCase : List[str] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self: Optional[Any] )-> List[str]:
lowerCamelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Dict = self.get_dummy_components()
lowerCamelCase : List[Any] = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase : Optional[Any] = AudioLDMPipeline(**__a )
lowerCamelCase : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : List[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Any = """egg cracking"""
lowerCamelCase : str = audioldm_pipe(**__a , negative_prompt=__a )
lowerCamelCase : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
lowerCamelCase : int = audio[:10]
lowerCamelCase : List[Any] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Dict = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase : Any = AudioLDMPipeline(**__a )
lowerCamelCase : List[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase : Dict = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase : List[Any] = 2
lowerCamelCase : Tuple = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase : Optional[int] = 2
lowerCamelCase : Dict = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase : str = 2
lowerCamelCase : Union[str, Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : str = self.get_dummy_components()
lowerCamelCase : str = AudioLDMPipeline(**__a )
lowerCamelCase : Any = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : List[Any] = audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(__a )
lowerCamelCase : str = audioldm_pipe(audio_length_in_s=0.0_16 , **__a )
lowerCamelCase : int = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_16
lowerCamelCase : int = audioldm_pipe(audio_length_in_s=0.0_32 , **__a )
lowerCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_32
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : int = AudioLDMPipeline(**__a )
lowerCamelCase : List[str] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : int = ["""hey"""]
lowerCamelCase : str = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase : Optional[Any] = output.audios.shape
assert audio_shape == (1, 256)
lowerCamelCase : Optional[int] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase : Optional[Any] = SpeechTaHifiGan(__a ).to(__a )
lowerCamelCase : Dict = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a__ ( self: Optional[Any] )-> int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def a__ ( self: List[Any] )-> str:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self: Dict )-> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[Any] )-> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: List[Any] , __a: str , __a: List[str]="cpu" , __a: Optional[int]=torch.floataa , __a: Optional[Any]=0 )-> List[Any]:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Optional[Any] = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) )
lowerCamelCase : str = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCamelCase : Optional[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : int = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = 25
lowerCamelCase : Tuple = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81_920
lowerCamelCase : Any = audio[77_230:77_240]
lowerCamelCase : Any = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowerCamelCase : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCamelCase : str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase : Optional[int] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Tuple = self.get_inputs(__a )
lowerCamelCase : int = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81_920
lowerCamelCase : List[str] = audio[27_780:27_790]
lowerCamelCase : Tuple = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowerCamelCase : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowerCamelCase :Optional[Any] = '\nimport os\n'
__lowerCamelCase :List[str] = '\ndef foo():\n import os\n return False\n'
__lowerCamelCase :List[str] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
__lowerCamelCase :Dict = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
__lowerCamelCase :List[str] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
__lowerCamelCase :int = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
__lowerCamelCase :Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
__lowerCamelCase :Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
__lowerCamelCase :Any = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
__lowerCamelCase :int = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
__lowerCamelCase :Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ) -> Tuple:
lowerCamelCase : Dict = os.path.join(UpperCamelCase__ , """test_file.py""" )
with open(UpperCamelCase__ , """w""" ) as _tmp_file:
_tmp_file.write(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = get_imports(UpperCamelCase__ )
assert parsed_imports == ["os"]
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =ProphetNetTokenizer
snake_case__ : int =False
def a__ ( self: str )-> Tuple:
super().setUp()
lowerCamelCase : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self: List[Any] , __a: List[str] )-> Any:
lowerCamelCase : Optional[Any] = """UNwant\u00E9d,running"""
lowerCamelCase : int = """unwanted, running"""
return input_text, output_text
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Any = self.tokenizer_class(self.vocab_file )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def a__ ( self: Any )-> Optional[Any]:
lowerCamelCase : Tuple = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def a__ ( self: str )-> List[Any]:
lowerCamelCase : Any = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def a__ ( self: Optional[int] )-> Dict:
lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def a__ ( self: Tuple )-> int:
lowerCamelCase : List[Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def a__ ( self: Union[str, Any] )-> List[Any]:
lowerCamelCase : Any = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a__ ( self: int )-> int:
lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : int = BasicTokenizer(do_lower_case=__a , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCamelCase : str = {}
for i, token in enumerate(__a ):
lowerCamelCase : Optional[int] = i
lowerCamelCase : Tuple = WordpieceTokenizer(vocab=__a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def a__ ( self: Dict )-> Any:
lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
lowerCamelCase : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase : Optional[Any] = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
lowerCamelCase : List[str] = tokenizer(__a , padding=__a , return_tensors="""pt""" )
self.assertIsInstance(__a , __a )
lowerCamelCase : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self: str )-> int:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def a__ ( self: List[Any] )-> Optional[Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def a__ ( self: int )-> Optional[int]:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def a__ ( self: int )-> Any:
lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
lowerCamelCase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def snake_case ( UpperCamelCase__ : Any ) -> Optional[Any]:
return EnvironmentCommand()
class A__ ( __lowercase):
"""simple docstring"""
@staticmethod
def a__ ( __a: ArgumentParser )-> Optional[Any]:
lowerCamelCase : Optional[int] = parser.add_parser("""env""" )
download_parser.set_defaults(func=__a )
def a__ ( self: List[Any] )-> Any:
lowerCamelCase : Any = huggingface_hub.__version__
lowerCamelCase : str = """not installed"""
lowerCamelCase : int = """NA"""
if is_torch_available():
import torch
lowerCamelCase : Optional[int] = torch.__version__
lowerCamelCase : int = torch.cuda.is_available()
lowerCamelCase : Any = """not installed"""
if is_transformers_available():
import transformers
lowerCamelCase : Any = transformers.__version__
lowerCamelCase : Optional[int] = """not installed"""
if is_accelerate_available():
import accelerate
lowerCamelCase : Union[str, Any] = accelerate.__version__
lowerCamelCase : int = """not installed"""
if is_xformers_available():
import xformers
lowerCamelCase : Dict = xformers.__version__
lowerCamelCase : Dict = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__a ) )
return info
@staticmethod
def a__ ( __a: int )-> Optional[Any]:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
import math
class A__ :
"""simple docstring"""
def a__ ( self: str , __a: list[list[float]] , __a: list[int] )-> int:
lowerCamelCase : Dict = 0.0
lowerCamelCase : Tuple = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a__ ( self: Dict , __a: list[list[int | float]] , __a: list[int] , __a: int , __a: float )-> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def snake_case ( ) -> None:
# Training Examples ( m, n )
lowerCamelCase : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase : int = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase : Optional[int] = SelfOrganizingMap()
lowerCamelCase : List[Any] = 3
lowerCamelCase : Tuple = 0.5
for _ in range(UpperCamelCase__ ):
for j in range(len(UpperCamelCase__ ) ):
# training sample
lowerCamelCase : Any = training_samples[j]
# Compute the winning vector
lowerCamelCase : Optional[int] = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# Update the winning vector
lowerCamelCase : List[Any] = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# classify test sample
lowerCamelCase : Dict = [0, 0, 0, 1]
lowerCamelCase : Dict = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def snake_case ( UpperCamelCase__ : list[float] ) -> int:
return np.maximum(0 , UpperCamelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase :int = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : int =['''pixel_values''']
def __init__( self: str , __a: bool = True , __a: Dict[str, int] = None , __a: float = None , __a: PILImageResampling = PILImageResampling.BILINEAR , __a: bool = True , __a: Union[int, float] = 1 / 255 , __a: bool = True , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[float, List[float]]] = None , **__a: Optional[int] , )-> None:
super().__init__(**__a )
lowerCamelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 384}
lowerCamelCase : List[str] = get_size_dict(__a , default_to_square=__a )
lowerCamelCase : Tuple = do_resize
lowerCamelCase : Dict = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : List[Any] = crop_pct if crop_pct is not None else 224 / 256
lowerCamelCase : Dict = resample
lowerCamelCase : Any = do_rescale
lowerCamelCase : str = rescale_factor
lowerCamelCase : Optional[Any] = do_normalize
lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self: Dict , __a: np.ndarray , __a: Dict[str, int] , __a: float , __a: PILImageResampling = PILImageResampling.BICUBIC , __a: Optional[Union[str, ChannelDimension]] = None , **__a: Optional[Any] , )-> np.ndarray:
lowerCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
lowerCamelCase : Optional[Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : Union[str, Any] = int(shortest_edge / crop_pct )
lowerCamelCase : str = get_resize_output_image_size(__a , size=__a , default_to_square=__a )
lowerCamelCase : Optional[int] = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a )
def a__ ( self: List[Any] , __a: np.ndarray , __a: Union[int, float] , __a: Optional[Union[str, ChannelDimension]] = None , **__a: Tuple , )-> Union[str, Any]:
return rescale(__a , scale=__a , data_format=__a , **__a )
def a__ ( self: Dict , __a: np.ndarray , __a: Union[float, List[float]] , __a: Union[float, List[float]] , __a: Optional[Union[str, ChannelDimension]] = None , **__a: List[Any] , )-> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def a__ ( self: Dict , __a: ImageInput , __a: bool = None , __a: Dict[str, int] = None , __a: float = None , __a: PILImageResampling = None , __a: bool = None , __a: float = None , __a: bool = None , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[str, TensorType]] = None , __a: ChannelDimension = ChannelDimension.FIRST , **__a: str , )-> PIL.Image.Image:
lowerCamelCase : Dict = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : List[str] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Tuple = size if size is not None else self.size
lowerCamelCase : Any = get_size_dict(__a , default_to_square=__a )
lowerCamelCase : Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : int = [to_numpy_array(__a ) for image in images]
if do_resize:
lowerCamelCase : Tuple = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a ) for image in images]
if do_rescale:
lowerCamelCase : Tuple = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
lowerCamelCase : List[str] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
lowerCamelCase : Dict = [to_channel_dimension_format(__a , __a ) for image in images]
lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase :Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase :Union[str, Any] = 128_022
__lowerCamelCase :Union[str, Any] = 128_028
@require_sentencepiece
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =MaMaaaTokenizer
snake_case__ : Optional[Any] =False
snake_case__ : Any =False
snake_case__ : int =True
def a__ ( self: str )-> Optional[int]:
super().setUp()
lowerCamelCase : Any = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : str = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
lowerCamelCase : List[str] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Optional[Any] , **__a: Any )-> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: Union[str, Any] , __a: List[Any] )-> Union[str, Any]:
return (
"This is a test",
"This is a test",
)
def a__ ( self: Tuple )-> int:
lowerCamelCase : List[str] = """</s>"""
lowerCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase : Optional[Any] = self.get_tokenizer()
lowerCamelCase : List[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(__a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def a__ ( self: str )-> str:
pass
def a__ ( self: Tuple )-> Dict:
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2, 3, 4, 5, 6] , )
lowerCamelCase : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_string(__a )
self.assertEqual(__a , """This is a test""" )
@slow
def a__ ( self: Any )-> List[str]:
# fmt: off
lowerCamelCase : Union[str, Any] = {"""input_ids""": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] ='''facebook/m2m100_418M'''
snake_case__ : Union[str, Any] =[
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
snake_case__ : Union[str, Any] =[
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
snake_case__ : List[Any] =[EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def a__ ( cls: Union[str, Any] )-> List[str]:
lowerCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
lowerCamelCase : List[str] = 1
return cls
def a__ ( self: int )-> Any:
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 128_063 )
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Optional[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(__a ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , __a )
def a__ ( self: Tuple )-> Optional[Any]:
lowerCamelCase : str = """en"""
lowerCamelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def a__ ( self: List[Any] )-> int:
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
lowerCamelCase : Optional[int] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
lowerCamelCase : Any = self.tokenizer.decode(__a , skip_special_tokens=__a )
lowerCamelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def a__ ( self: List[str] )-> List[str]:
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
lowerCamelCase : Any = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__a )
lowerCamelCase : Tuple = MaMaaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.lang_token_to_id , __a )
@require_torch
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[Any] = """en"""
lowerCamelCase : List[str] = """fr"""
lowerCamelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
lowerCamelCase : str = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowerCamelCase : Optional[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase : Optional[Any] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowerCamelCase : Tuple = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : Any = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowerCamelCase : Any = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : Optional[Any] = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"""input_ids""": [[128_022, 58, 4_183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 128_006,
} , )
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """ylacombe/bark-small"""
lowerCamelCase : int = tempfile.mkdtemp()
lowerCamelCase : int = """en_speaker_1"""
lowerCamelCase : List[str] = """This is a test string"""
lowerCamelCase : Optional[Any] = """speaker_embeddings_path.json"""
lowerCamelCase : int = """speaker_embeddings"""
def a__ ( self: Optional[Any] , **__a: Any )-> Optional[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__a )
def a__ ( self: List[str] )-> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : Optional[Any] = self.get_tokenizer()
lowerCamelCase : Optional[int] = BarkProcessor(tokenizer=__a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : Any = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def a__ ( self: Dict )-> List[str]:
lowerCamelCase : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase : str = 35
lowerCamelCase : Tuple = 2
lowerCamelCase : Any = 8
lowerCamelCase : Dict = {
"""semantic_prompt""": np.ones(__a ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase : Optional[int] = processor(text=self.input_string , voice_preset=__a )
lowerCamelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__a , **__a )
lowerCamelCase : Any = processor(text=self.input_string , voice_preset=__a )
lowerCamelCase : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase : Any = processor(text=self.input_string , voice_preset=self.voice_preset )
def a__ ( self: int )-> Union[str, Any]:
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = BarkProcessor(tokenizer=__a )
lowerCamelCase : List[Any] = processor(text=self.input_string )
lowerCamelCase : Union[str, Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=1024 ) -> Any:
lowerCamelCase , lowerCamelCase : Union[str, Any] = [], []
lowerCamelCase : List[str] = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase , lowerCamelCase : List[Any] = sorted_examples[0]
def is_too_big(UpperCamelCase__ : Tuple ):
return tok(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCamelCase : Optional[Any] = new_src + """ """ + src
lowerCamelCase : int = new_tgt + """ """ + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Tuple = src, tgt
else: # can fit, keep adding
lowerCamelCase , lowerCamelCase : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Path , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Optional[int]:
lowerCamelCase : List[str] = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
lowerCamelCase , lowerCamelCase : int = data_dir / F'{split}.source', data_dir / F'{split}.target'
lowerCamelCase : Optional[int] = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
lowerCamelCase : Optional[Any] = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
lowerCamelCase , lowerCamelCase : Union[str, Any] = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F'packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.' )
Path(save_path / F'{split}.source' ).open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
Path(save_path / F'{split}.target' ).open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
for split in ["val", "test"]:
lowerCamelCase , lowerCamelCase : int = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(UpperCamelCase__ , save_path / F'{split}.source' )
shutil.copyfile(UpperCamelCase__ , save_path / F'{split}.target' )
def snake_case ( ) -> Any:
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCamelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCamelCase__ , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCamelCase__ )
parser.add_argument("""--save_path""" , type=UpperCamelCase__ )
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 42
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCamelCase :Tuple = 250_004
__lowerCamelCase :Dict = 250_020
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =MBartTokenizer
snake_case__ : Optional[int] =MBartTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : str =True
def a__ ( self: Tuple )-> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : Optional[int] = MBartTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: List[str] )-> Optional[int]:
lowerCamelCase : List[str] = MBartTokenizer(__a , keep_accents=__a )
lowerCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a__ ( self: Optional[Any] )-> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
lowerCamelCase : str = self.tokenizer_class.from_pretrained(__a , **__a )
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
lowerCamelCase : Tuple = tokenizer_r.save_pretrained(__a )
lowerCamelCase : Optional[Any] = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCamelCase : Optional[int] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(__a )
lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
lowerCamelCase : int = tempfile.mkdtemp()
lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__a , legacy_format=__a )
lowerCamelCase : Any = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
lowerCamelCase : int = tokenizer_r.from_pretrained(__a )
lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
lowerCamelCase : Dict = tempfile.mkdtemp()
lowerCamelCase : Any = tokenizer_r.save_pretrained(__a , legacy_format=__a )
lowerCamelCase : List[str] = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase : Tuple = tokenizer_r.from_pretrained(__a )
lowerCamelCase : Dict = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Dict ='''facebook/mbart-large-en-ro'''
snake_case__ : int =[
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
snake_case__ : Optional[int] =[
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
snake_case__ : Optional[int] =[82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def a__ ( cls: int )-> Union[str, Any]:
lowerCamelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
lowerCamelCase : List[Any] = 1
return cls
def a__ ( self: Tuple )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def a__ ( self: Optional[int] )-> Optional[Any]:
self.assertIn(__a , self.tokenizer.all_special_ids )
lowerCamelCase : Union[str, Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
lowerCamelCase : Tuple = self.tokenizer.decode(__a , skip_special_tokens=__a )
lowerCamelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def a__ ( self: List[Any] )-> Any:
lowerCamelCase : str = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __a )
lowerCamelCase : Dict = 10
lowerCamelCase : Union[str, Any] = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def a__ ( self: str )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_026, 250_001] )
def a__ ( self: Tuple )-> List[str]:
lowerCamelCase : str = tempfile.mkdtemp()
lowerCamelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
lowerCamelCase : Optional[int] = MBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def a__ ( self: Dict )-> int:
lowerCamelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
lowerCamelCase : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def a__ ( self: Dict )-> Dict:
lowerCamelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def a__ ( self: str )-> Dict:
lowerCamelCase : Any = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="""pt""" )
lowerCamelCase : int = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="""pt""" )
lowerCamelCase : Optional[int] = targets["""input_ids"""]
lowerCamelCase : Optional[int] = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3_034, 2, 250_004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase :List[Any] = logging.get_logger(__name__)
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> Dict:
lowerCamelCase : List[Any] = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowerCamelCase : Any = downstream_dict["""projector.weight"""]
lowerCamelCase : Dict = downstream_dict["""projector.bias"""]
lowerCamelCase : Dict = downstream_dict["""model.post_net.linear.weight"""]
lowerCamelCase : Optional[int] = downstream_dict["""model.post_net.linear.bias"""]
return model
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Tuple:
lowerCamelCase : Any = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowerCamelCase : Dict = downstream_dict["""model.linear.weight"""]
lowerCamelCase : Tuple = downstream_dict["""model.linear.bias"""]
return model
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = WavaVecaForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
lowerCamelCase : str = downstream_dict["""connector.weight"""]
lowerCamelCase : Any = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase : Optional[int] = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowerCamelCase : List[Any] = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowerCamelCase : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowerCamelCase : Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowerCamelCase : Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowerCamelCase : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowerCamelCase : List[Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> Optional[int]:
lowerCamelCase : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
lowerCamelCase : List[str] = checkpoint["""Downstream"""]
lowerCamelCase : int = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : int = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
lowerCamelCase : Any = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowerCamelCase : List[str] = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowerCamelCase : Tuple = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForXVector""" ):
lowerCamelCase : str = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowerCamelCase : str = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 42
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :Dict = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__lowerCamelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase :List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
"""simple docstring"""
from collections import deque
class A__ :
"""simple docstring"""
def __init__( self: str , __a: str , __a: int , __a: int )-> None:
lowerCamelCase : Optional[Any] = process_name # process name
lowerCamelCase : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase : Optional[Any] = arrival_time
lowerCamelCase : Union[str, Any] = burst_time # remaining burst time
lowerCamelCase : int = 0 # total time of the process wait in ready queue
lowerCamelCase : Optional[int] = 0 # time from arrival time to completion time
class A__ :
"""simple docstring"""
def __init__( self: Dict , __a: int , __a: list[int] , __a: deque[Process] , __a: int , )-> None:
# total number of mlfq's queues
lowerCamelCase : int = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase : Any = time_slices
# unfinished process is in this ready_queue
lowerCamelCase : Dict = queue
# current time
lowerCamelCase : Any = current_time
# finished process is in this sequence queue
lowerCamelCase : deque[Process] = deque()
def a__ ( self: Union[str, Any] )-> list[str]:
lowerCamelCase : str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a__ ( self: Any , __a: list[Process] )-> list[int]:
lowerCamelCase : List[str] = []
for i in range(len(__a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a__ ( self: Optional[int] , __a: list[Process] )-> list[int]:
lowerCamelCase : int = []
for i in range(len(__a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a__ ( self: int , __a: list[Process] )-> list[int]:
lowerCamelCase : Union[str, Any] = []
for i in range(len(__a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a__ ( self: Optional[int] , __a: deque[Process] )-> list[int]:
return [q.burst_time for q in queue]
def a__ ( self: int , __a: Process )-> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a__ ( self: List[Any] , __a: deque[Process] )-> deque[Process]:
lowerCamelCase : deque[Process] = deque() # sequence deque of finished process
while len(__a ) != 0:
lowerCamelCase : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase : Any = 0
# set the process's turnaround time because it is finished
lowerCamelCase : int = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(__a )
self.finish_queue.extend(__a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a__ ( self: Optional[int] , __a: deque[Process] , __a: int )-> tuple[deque[Process], deque[Process]]:
lowerCamelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__a ) ):
lowerCamelCase : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase : int = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase : str = 0
# set the finish time
lowerCamelCase : Dict = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase : int = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__a )
self.finish_queue.extend(__a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a__ ( self: int )-> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase , lowerCamelCase : int = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__lowerCamelCase :List[str] = Process('P1', 0, 53)
__lowerCamelCase :Optional[Any] = Process('P2', 0, 17)
__lowerCamelCase :Tuple = Process('P3', 0, 68)
__lowerCamelCase :str = Process('P4', 0, 24)
__lowerCamelCase :Union[str, Any] = 3
__lowerCamelCase :Dict = [17, 25]
__lowerCamelCase :str = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
__lowerCamelCase :str = Process('P1', 0, 53)
__lowerCamelCase :Tuple = Process('P2', 0, 17)
__lowerCamelCase :List[str] = Process('P3', 0, 68)
__lowerCamelCase :int = Process('P4', 0, 24)
__lowerCamelCase :Dict = 3
__lowerCamelCase :Dict = [17, 25]
__lowerCamelCase :Any = deque([Pa, Pa, Pa, Pa])
__lowerCamelCase :Optional[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
__lowerCamelCase :Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 42
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__lowerCamelCase :Dict = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__lowerCamelCase :Any = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case ( ) -> List[str]:
lowerCamelCase : Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCamelCase : Any = bs[:]
lowerCamelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
lowerCamelCase : Tuple = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def snake_case ( UpperCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase : Tuple = set()
lowerCamelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : List[Any] = char
return pairs
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =VOCAB_FILES_NAMES
snake_case__ : Dict =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] =['''input_ids''', '''attention_mask''']
def __init__( self: Optional[Any] , __a: Optional[Any] , __a: List[Any] , __a: Dict="replace" , __a: Optional[Any]="<s>" , __a: List[Any]="</s>" , __a: List[str]="</s>" , __a: List[str]="<s>" , __a: int="<unk>" , __a: Optional[int]="<pad>" , __a: Tuple="<mask>" , __a: Optional[Any]=False , **__a: Dict , )-> List[str]:
lowerCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
lowerCamelCase : Union[str, Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
lowerCamelCase : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
lowerCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
lowerCamelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
lowerCamelCase : Union[str, Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Union[str, Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase : Optional[int] = json.load(__a )
lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
lowerCamelCase : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase : Any = bytes_to_unicode()
lowerCamelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase : Any = merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase : Dict = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase : Dict = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def a__ ( self: Union[str, Any] )-> List[Any]:
return len(self.encoder )
def a__ ( self: int )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self: str , __a: List[Any] )-> Union[str, Any]:
if token in self.cache:
return self.cache[token]
lowerCamelCase : Any = tuple(__a )
lowerCamelCase : Dict = get_pairs(__a )
if not pairs:
return token
while True:
lowerCamelCase : List[str] = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase : Optional[Any] = bigram
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Optional[Any] = 0
while i < len(__a ):
try:
lowerCamelCase : Optional[Any] = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : Any = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : Optional[int] = tuple(__a )
lowerCamelCase : List[str] = new_word
if len(__a ) == 1:
break
else:
lowerCamelCase : Union[str, Any] = get_pairs(__a )
lowerCamelCase : Union[str, Any] = """ """.join(__a )
lowerCamelCase : Dict = word
return word
def a__ ( self: str , __a: Union[str, Any] )-> Any:
lowerCamelCase : int = []
for token in re.findall(self.pat , __a ):
lowerCamelCase : str = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(""" """ ) )
return bpe_tokens
def a__ ( self: Optional[Any] , __a: List[str] )-> Optional[Any]:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def a__ ( self: Union[str, Any] , __a: List[Any] )-> Tuple:
return self.decoder.get(__a )
def a__ ( self: Any , __a: List[str] )-> Any:
lowerCamelCase : Union[str, Any] = """""".join(__a )
lowerCamelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self: List[Any] , __a: str , __a: Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase : int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + """\n""" )
lowerCamelCase : List[Any] = 0
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase : Tuple = token_index
writer.write(""" """.join(__a ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self: List[Any] , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Any = [self.cls_token_id]
lowerCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self: Any , __a: List[int] , __a: Optional[List[int]] = None , __a: bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def a__ ( self: Tuple , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self: Optional[Any] , __a: Tuple , __a: int=False , **__a: Any )-> Tuple:
lowerCamelCase : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
lowerCamelCase : str = """ """ + text
return (text, kwargs)
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__lowerCamelCase :Optional[int] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ) -> List[str]:
if args.student_type == "roberta":
lowerCamelCase : List[Any] = False
elif args.student_type == "gpt2":
lowerCamelCase : Optional[int] = False
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> Optional[int]:
if args.student_type == "roberta":
lowerCamelCase : str = False
def snake_case ( ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCamelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCamelCase__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.1_5 , type=UpperCamelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCamelCase__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCamelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCamelCase__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCamelCase__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.0_5 , type=UpperCamelCase__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCamelCase__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.0_2 , type=UpperCamelCase__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCamelCase__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCamelCase__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCamelCase__ , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCamelCase__ , default=4000 , help="""Checkpoint interval.""" )
lowerCamelCase : Union[str, Any] = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = MODEL_CLASSES[args.student_type]
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCamelCase : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCamelCase : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCamelCase : Union[str, Any] = tokenizer.all_special_tokens.index(UpperCamelCase__ )
lowerCamelCase : int = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
lowerCamelCase : Any = special_tok_ids
lowerCamelCase : int = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , """rb""" ) as fp:
lowerCamelCase : Optional[int] = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , """rb""" ) as fp:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCamelCase : Union[str, Any] = 0.0 # do not predict special tokens
lowerCamelCase : List[str] = torch.from_numpy(UpperCamelCase__ )
else:
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : int = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
lowerCamelCase : List[Any] = student_config_class.from_pretrained(args.student_config )
lowerCamelCase : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
lowerCamelCase : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
lowerCamelCase : Optional[Any] = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info("""Student loaded.""" )
# TEACHER #
lowerCamelCase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCamelCase : Dict = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 42
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :Any = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 1
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int = 4000000 ) -> int:
lowerCamelCase : List[Any] = [0, 1]
lowerCamelCase : Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Any = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def snake_case ( UpperCamelCase__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
lowerCamelCase : Optional[int] = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple[int, ...] ) -> Tuple[int, ...]:
lowerCamelCase : str = []
for d in reversed(UpperCamelCase__ ):
idx.append(flat_idx % d )
lowerCamelCase : Any = flat_idx // d
return tuple(reversed(UpperCamelCase__ ) )
@torch.jit.ignore
def snake_case ( UpperCamelCase__ : Sequence[int] , UpperCamelCase__ : Sequence[int] , UpperCamelCase__ : Sequence[int] , UpperCamelCase__ : Optional[Sequence[bool]] = None , UpperCamelCase__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(UpperCamelCase__ : List[bool] ) -> None:
lowerCamelCase : str = True
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : int = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase : Dict = l[reversed_idx]
if start_edges is None:
lowerCamelCase : Optional[int] = [s == 0 for s in start]
reduce_edge_list(UpperCamelCase__ )
if end_edges is None:
lowerCamelCase : List[str] = [e == (d - 1) for e, d in zip(UpperCamelCase__ , UpperCamelCase__ )]
reduce_edge_list(UpperCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCamelCase__ ) == 0:
return [()]
elif len(UpperCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase : List[Tuple[slice, ...]] = []
lowerCamelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCamelCase__ , UpperCamelCase__ ):
if s == e:
path_list.append(slice(UpperCamelCase__ , s + 1 ) )
else:
break
lowerCamelCase : Tuple[slice, ...] = tuple(UpperCamelCase__ )
lowerCamelCase : List[str] = len(UpperCamelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase : str = end[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase : Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def snake_case ( UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> torch.Tensor:
lowerCamelCase : str = t.shape[:no_batch_dims]
lowerCamelCase : Dict = list(_flat_idx_to_idx(UpperCamelCase__ , UpperCamelCase__ ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase : List[str] = list(_flat_idx_to_idx(flat_end - 1 , UpperCamelCase__ ) )
# Get an ordered list of slices to perform
lowerCamelCase : Any = _get_minimal_slice_set(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
lowerCamelCase : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def snake_case ( UpperCamelCase__ : Callable , UpperCamelCase__ : Dict[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool = False , UpperCamelCase__ : Any = None , UpperCamelCase__ : bool = False , ) -> Any:
if not (len(UpperCamelCase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
lowerCamelCase : str = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )]
lowerCamelCase : Any = tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] )
def _prep_inputs(UpperCamelCase__ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , UpperCamelCase__ )
lowerCamelCase : str = None
if _out is not None:
lowerCamelCase : Union[str, Any] = tensor_tree_map(lambda UpperCamelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase : int = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase : Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCamelCase__ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase : int = 0
lowerCamelCase : Optional[int] = prepped_outputs
for _ in range(UpperCamelCase__ ):
# Chunk the input
if not low_mem:
lowerCamelCase : str = _select_chunk
else:
lowerCamelCase : Union[str, Any] = partial(
_chunk_slice , flat_start=UpperCamelCase__ , flat_end=min(UpperCamelCase__ , i + chunk_size ) , no_batch_dims=len(UpperCamelCase__ ) , )
lowerCamelCase : Dict[str, Any] = tensor_tree_map(UpperCamelCase__ , UpperCamelCase__ )
# Run the layer on the chunk
lowerCamelCase : Any = layer(**UpperCamelCase__ )
# Allocate space for the output
if out is None:
lowerCamelCase : List[str] = tensor_tree_map(lambda UpperCamelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
def assign(UpperCamelCase__ : dict , UpperCamelCase__ : dict ) -> None:
for k, v in da.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
assign(UpperCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase : List[Any] = da[k]
assign(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for xa, xa in zip(UpperCamelCase__ , UpperCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase : Union[str, Any] = xa
elif isinstance(UpperCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase : List[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
lowerCamelCase : Optional[int] = tensor_tree_map(lambda UpperCamelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCamelCase__ )
return out
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: int = 512 , )-> Optional[int]:
lowerCamelCase : List[Any] = max_chunk_size
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[tuple] = None
def a__ ( self: Union[str, Any] , __a: Callable , __a: tuple , __a: int )-> int:
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase : Union[str, Any] = [c for c in candidates if c > min_chunk_size]
lowerCamelCase : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a: int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[int] = len(__a ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase : List[str] = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase : int = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase : Union[str, Any] = i
lowerCamelCase : Dict = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a__ ( self: Dict , __a: Iterable , __a: Iterable )-> bool:
lowerCamelCase : Dict = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
lowerCamelCase : str = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
lowerCamelCase : List[str] = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def a__ ( self: Tuple , __a: Callable , __a: tuple , __a: int , )-> int:
lowerCamelCase : List[str] = True
lowerCamelCase : tuple = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
lowerCamelCase : Any = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase : int = False
if not consistent:
lowerCamelCase : Optional[Any] = self._determine_favorable_chunk_size(
__a , __a , __a , )
lowerCamelCase : List[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 42
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 1
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def snake_case ( UpperCamelCase__ : List[str] ) -> str:
lowerCamelCase : List[Any] = []
for line in lines:
lowerCamelCase : str = re.sub(R"""#.*""" , """""" , UpperCamelCase__ ) # remove comments
if line:
filtered_lines.append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = """\n""".join(UpperCamelCase__ )
# Make a hash from all this code
lowerCamelCase : Any = full_str.encode("""utf-8""" )
return shaaaa(UpperCamelCase__ ).hexdigest()
# get importable module names and hash for caching
__lowerCamelCase :List[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowerCamelCase :str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowerCamelCase :Any = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
__lowerCamelCase :Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 42
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__lowerCamelCase :Optional[int] = 500_000
__lowerCamelCase , __lowerCamelCase :int = os.path.split(__file__)
__lowerCamelCase :Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def snake_case ( UpperCamelCase__ : datasets.Dataset , **UpperCamelCase__ : Dict ) -> List[Any]:
lowerCamelCase : Optional[Any] = dataset.map(**UpperCamelCase__ )
@get_duration
def snake_case ( UpperCamelCase__ : datasets.Dataset , **UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase : Optional[int] = dataset.filter(**UpperCamelCase__ )
def snake_case ( ) -> Any:
lowerCamelCase : Tuple = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : str = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(UpperCamelCase__ , """dataset.arrow""" ) , UpperCamelCase__ , num_examples=UpperCamelCase__ )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase__ )
def tokenize(UpperCamelCase__ : Dict ):
return tokenizer(examples["""text"""] )
lowerCamelCase : Any = map(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = map(UpperCamelCase__ , batched=UpperCamelCase__ )
lowerCamelCase : List[str] = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Dict = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[Any] = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowerCamelCase : str = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowerCamelCase : Tuple = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
lowerCamelCase : Tuple = map(UpperCamelCase__ , function=UpperCamelCase__ , batched=UpperCamelCase__ )
lowerCamelCase : List[str] = filter(UpperCamelCase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase__ , """wb""" ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 1
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 42
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def snake_case ( UpperCamelCase__ : list[Any] ) -> None:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def snake_case ( UpperCamelCase__ : list[Any] , UpperCamelCase__ : list[Any] , UpperCamelCase__ : int ) -> None:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCamelCase :list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
"""simple docstring"""
from PIL import Image
def snake_case ( UpperCamelCase__ : Image , UpperCamelCase__ : float ) -> Image:
def brightness(UpperCamelCase__ : int ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__lowerCamelCase :Optional[Any] = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase :str = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''sew-d'''
def __init__( self: Optional[int] , __a: List[str]=32 , __a: Union[str, Any]=768 , __a: str=12 , __a: List[Any]=12 , __a: Optional[Any]=3_072 , __a: Optional[int]=2 , __a: Optional[Any]=512 , __a: int=256 , __a: Union[str, Any]=True , __a: str=True , __a: Optional[int]=("p2c", "c2p") , __a: Optional[int]="layer_norm" , __a: Any="gelu_python" , __a: int=0.1 , __a: Optional[int]=0.1 , __a: Any=0.1 , __a: Any=0.0 , __a: Optional[Any]=0.1 , __a: int=0.02 , __a: Dict=1e-7 , __a: str=1e-5 , __a: str="group" , __a: Optional[int]="gelu" , __a: List[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a: Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a: int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a: Dict=False , __a: List[Any]=128 , __a: str=16 , __a: List[str]=True , __a: Dict=0.05 , __a: int=10 , __a: Tuple=2 , __a: str=0.0 , __a: Dict=10 , __a: int=0 , __a: Any="mean" , __a: Any=False , __a: List[str]=False , __a: int=256 , __a: Tuple=0 , __a: List[str]=1 , __a: str=2 , **__a: List[str] , )-> List[str]:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
lowerCamelCase : Any = hidden_size
lowerCamelCase : Union[str, Any] = feat_extract_norm
lowerCamelCase : Union[str, Any] = feat_extract_activation
lowerCamelCase : Union[str, Any] = list(__a )
lowerCamelCase : str = list(__a )
lowerCamelCase : List[str] = list(__a )
lowerCamelCase : Dict = conv_bias
lowerCamelCase : Dict = num_conv_pos_embeddings
lowerCamelCase : Optional[Any] = num_conv_pos_embedding_groups
lowerCamelCase : Union[str, Any] = len(self.conv_dim )
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : int = squeeze_factor
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Any = position_buckets
lowerCamelCase : Dict = share_att_key
lowerCamelCase : int = relative_attention
lowerCamelCase : Tuple = norm_rel_ebd
lowerCamelCase : Optional[Any] = list(__a )
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Tuple = hidden_dropout
lowerCamelCase : Optional[Any] = attention_dropout
lowerCamelCase : List[Any] = activation_dropout
lowerCamelCase : Any = feat_proj_dropout
lowerCamelCase : Dict = final_dropout
lowerCamelCase : Optional[Any] = layer_norm_eps
lowerCamelCase : Union[str, Any] = feature_layer_norm_eps
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Any = apply_spec_augment
lowerCamelCase : Dict = mask_time_prob
lowerCamelCase : Any = mask_time_length
lowerCamelCase : str = mask_time_min_masks
lowerCamelCase : int = mask_feature_prob
lowerCamelCase : int = mask_feature_length
lowerCamelCase : Optional[int] = mask_feature_min_masks
# ctc loss
lowerCamelCase : Dict = ctc_loss_reduction
lowerCamelCase : Optional[int] = ctc_zero_infinity
# sequence classification
lowerCamelCase : int = use_weighted_layer_sum
lowerCamelCase : Tuple = classifier_proj_size
@property
def a__ ( self: str )-> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 42
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =LEDTokenizer
snake_case__ : Any =LEDTokenizerFast
snake_case__ : Optional[int] =True
def a__ ( self: Union[str, Any] )-> int:
super().setUp()
lowerCamelCase : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCamelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase : List[Any] = {"""unk_token""": """<unk>"""}
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def a__ ( self: List[Any] , **__a: Optional[Any] )-> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: int , **__a: Optional[int] )-> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: Dict , __a: int )-> Any:
return "lower newer", "lower newer"
@cached_property
def a__ ( self: Dict )-> List[Any]:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def a__ ( self: Optional[Any] )-> int:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def a__ ( self: List[Any] )-> int:
lowerCamelCase : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : Dict = tokenizer(__a , max_length=len(__a ) , padding=__a , return_tensors="""pt""" )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(__a , __a )
@require_torch
def a__ ( self: Any )-> Any:
lowerCamelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : int = tokenizer(__a , padding=__a , return_tensors="""pt""" )
self.assertIn("""input_ids""" , __a )
self.assertIn("""attention_mask""" , __a )
self.assertNotIn("""labels""" , __a )
self.assertNotIn("""decoder_attention_mask""" , __a )
@require_torch
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : Any = tokenizer(text_target=__a , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def a__ ( self: int )-> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : List[str] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=__a , truncation=__a , return_tensors="""pt""" )
self.assertIsInstance(__a , __a )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def a__ ( self: Tuple )-> Optional[Any]:
lowerCamelCase : Tuple = ["""A long paragraph for summarization."""]
lowerCamelCase : Any = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : str = tokenizer(__a , return_tensors="""pt""" )
lowerCamelCase : str = tokenizer(text_target=__a , return_tensors="""pt""" )
lowerCamelCase : Dict = inputs["""input_ids"""]
lowerCamelCase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def a__ ( self: Optional[Any] )-> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase : List[str] = ["""Summary of the text.""", """Another summary."""]
lowerCamelCase : Tuple = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase : Optional[int] = tokenizer(__a , padding=__a )
lowerCamelCase : Any = [[0] * len(__a ) for x in encoded_output["""input_ids"""]]
lowerCamelCase : Union[str, Any] = tokenizer.pad(__a )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , __a )
def a__ ( self: int )-> Optional[int]:
pass
def a__ ( self: Any )-> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
lowerCamelCase : int = """A, <mask> AllenNLP sentence."""
lowerCamelCase : Optional[Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
lowerCamelCase : Tuple = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowerCamelCase : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCamelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 42
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
lowerCamelCase : Tuple = get_failure_array(UpperCamelCase__ )
# 2) Step through text searching for pattern
lowerCamelCase , lowerCamelCase : str = 0, 0 # index into text, pattern
while i < len(UpperCamelCase__ ):
if pattern[j] == text[i]:
if j == (len(UpperCamelCase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase : Any = failure[j - 1]
continue
i += 1
return False
def snake_case ( UpperCamelCase__ : str ) -> list[int]:
lowerCamelCase : Dict = [0]
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[int] = 1
while j < len(UpperCamelCase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(UpperCamelCase__ )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCamelCase :str = 'abc1abc12'
__lowerCamelCase :str = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__lowerCamelCase :Dict = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCamelCase :str = 'ABABX'
__lowerCamelCase :Any = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__lowerCamelCase :Any = 'AAAB'
__lowerCamelCase :Tuple = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__lowerCamelCase :Tuple = 'abcdabcy'
__lowerCamelCase :int = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__lowerCamelCase :Tuple = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 42
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 1
|
"""simple docstring"""
from collections.abc import Callable
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: Callable | None = None )-> None:
# Stores actual heap items.
lowerCamelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCamelCase : dict = {}
# Stores current size of heap.
lowerCamelCase : Any = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCamelCase : int = key or (lambda __a : x)
def a__ ( self: Optional[Any] , __a: int )-> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def a__ ( self: Optional[int] , __a: int )-> int | None:
lowerCamelCase : List[Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def a__ ( self: Any , __a: int )-> int | None:
lowerCamelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def a__ ( self: Dict , __a: int , __a: int )-> None:
lowerCamelCase , lowerCamelCase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCamelCase , lowerCamelCase : List[str] = self.arr[j], self.arr[i]
def a__ ( self: Dict , __a: int , __a: int )-> bool:
return self.arr[i][1] < self.arr[j][1]
def a__ ( self: Dict , __a: int )-> int:
lowerCamelCase : Optional[int] = self._left(__a )
lowerCamelCase : Tuple = self._right(__a )
lowerCamelCase : str = i
if left is not None and not self._cmp(__a , __a ):
lowerCamelCase : Any = left
if right is not None and not self._cmp(__a , __a ):
lowerCamelCase : Union[str, Any] = right
return valid_parent
def a__ ( self: Tuple , __a: int )-> None:
lowerCamelCase : Dict = self._parent(__a )
while parent is not None and not self._cmp(__a , __a ):
self._swap(__a , __a )
lowerCamelCase , lowerCamelCase : Union[str, Any] = parent, self._parent(__a )
def a__ ( self: Tuple , __a: int )-> None:
lowerCamelCase : List[Any] = self._get_valid_parent(__a )
while valid_parent != index:
self._swap(__a , __a )
lowerCamelCase , lowerCamelCase : List[str] = valid_parent, self._get_valid_parent(__a )
def a__ ( self: int , __a: int , __a: int )-> None:
if item not in self.pos_map:
return
lowerCamelCase : Optional[Any] = self.pos_map[item]
lowerCamelCase : int = [item, self.key(__a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__a )
self._heapify_down(__a )
def a__ ( self: Dict , __a: int )-> None:
if item not in self.pos_map:
return
lowerCamelCase : Optional[Any] = self.pos_map[item]
del self.pos_map[item]
lowerCamelCase : Optional[Any] = self.arr[self.size - 1]
lowerCamelCase : List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__a )
self._heapify_down(__a )
def a__ ( self: int , __a: int , __a: int )-> None:
lowerCamelCase : Optional[Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__a )] )
else:
lowerCamelCase : Any = [item, self.key(__a )]
lowerCamelCase : Tuple = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def a__ ( self: Union[str, Any] )-> tuple | None:
return self.arr[0] if self.size else None
def a__ ( self: Tuple )-> tuple | None:
lowerCamelCase : Optional[int] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def snake_case ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase :int = TypeVar('KT')
__lowerCamelCase :Any = TypeVar('VT')
class A__ ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: KT | str = "root" , __a: VT | None = None )-> List[Any]:
lowerCamelCase : List[Any] = key
lowerCamelCase : Tuple = value
lowerCamelCase : list[Node[KT, VT]] = []
def __repr__( self: str )-> str:
return f'Node({self.key}: {self.value})'
@property
def a__ ( self: List[Any] )-> int:
return len(self.forward )
class A__ ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self: List[str] , __a: float = 0.5 , __a: int = 16 )-> Dict:
lowerCamelCase : Node[KT, VT] = Node[KT, VT]()
lowerCamelCase : Optional[int] = 0
lowerCamelCase : Optional[Any] = p
lowerCamelCase : Optional[Any] = max_level
def __str__( self: List[str] )-> str:
lowerCamelCase : List[Any] = list(self )
if len(__a ) == 0:
return f'SkipList(level={self.level})'
lowerCamelCase : List[Any] = max((len(str(__a ) ) for item in items) , default=4 )
lowerCamelCase : List[Any] = max(__a , 4 ) + 4
lowerCamelCase : List[Any] = self.head
lowerCamelCase : Any = []
lowerCamelCase : Dict = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(__a , """-""" ) + """* """ * len(__a ) )
lines.append(""" """ * label_size + """| """ * len(__a ) )
while len(node.forward ) != 0:
lowerCamelCase : Tuple = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(__a , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(__a ) )
lowerCamelCase : Union[str, Any] = node.forward
lines.append("""None""".ljust(__a ) + """* """ * len(__a ) )
return f'SkipList(level={self.level})\n' + "\n".join(__a )
def __iter__( self: List[Any] )-> Tuple:
lowerCamelCase : Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCamelCase : Dict = node.forward[0]
def a__ ( self: Dict )-> int:
lowerCamelCase : Any = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def a__ ( self: Tuple , __a: Optional[int] )-> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : List[str] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCamelCase : List[str] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__a )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def a__ ( self: Optional[Any] , __a: KT )-> Any:
lowerCamelCase , lowerCamelCase : Tuple = self._locate_node(__a )
if node is not None:
for i, update_node in enumerate(__a ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCamelCase : Dict = node.forward[i]
else:
lowerCamelCase : Optional[Any] = update_node.forward[:i]
def a__ ( self: Tuple , __a: KT , __a: VT )-> List[str]:
lowerCamelCase , lowerCamelCase : Dict = self._locate_node(__a )
if node is not None:
lowerCamelCase : Union[str, Any] = value
else:
lowerCamelCase : str = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __a ):
update_vector.append(self.head )
lowerCamelCase : Dict = level
lowerCamelCase : Union[str, Any] = Node(__a , __a )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__a )
else:
lowerCamelCase : Tuple = new_node
def a__ ( self: Optional[Any] , __a: VT )-> VT | None:
lowerCamelCase , lowerCamelCase : Dict = self._locate_node(__a )
if node is not None:
return node.value
return None
def snake_case ( ) -> str:
lowerCamelCase : Optional[Any] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
lowerCamelCase : Optional[int] = skip_list.head
lowerCamelCase : int = {}
while node.level != 0:
lowerCamelCase : str = node.forward[0]
lowerCamelCase : int = node.value
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def snake_case ( ) -> Tuple:
lowerCamelCase : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
lowerCamelCase : List[str] = skip_list.head
lowerCamelCase : Any = {}
while node.level != 0:
lowerCamelCase : str = node.forward[0]
lowerCamelCase : Tuple = node.value
if len(UpperCamelCase__ ) != 4:
print()
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def snake_case ( ) -> List[Any]:
lowerCamelCase : Dict = SkipList()
assert skip_list.find("""Some key""" ) is None
def snake_case ( ) -> Optional[int]:
lowerCamelCase : Any = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def snake_case ( ) -> Any:
lowerCamelCase : Union[str, Any] = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def snake_case ( ) -> List[Any]:
lowerCamelCase : List[str] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def snake_case ( ) -> List[str]:
lowerCamelCase : List[str] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def snake_case ( ) -> Tuple:
lowerCamelCase : str = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(UpperCamelCase__ : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def snake_case ( ) -> Any:
def is_sorted(UpperCamelCase__ : List[Any] ):
return all(next_item >= item for item, next_item in zip(UpperCamelCase__ , lst[1:] ) )
lowerCamelCase : Union[str, Any] = SkipList()
for i in range(10 ):
skip_list.insert(UpperCamelCase__ , UpperCamelCase__ )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(UpperCamelCase__ ) )
def snake_case ( ) -> str:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def snake_case ( ) -> str:
lowerCamelCase : Optional[Any] = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 42
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.