code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def _lowercase( __a : Optional[Any] , __a : Optional[int] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
a__ =(boundary[1] - boundary[0]) / steps
a__ =boundary[0]
a__ =boundary[1]
a__ =make_points(__a , __a , __a )
a__ =0.0
y += (h / 2.0) * f(__a )
for i in x_i:
# print(i)
y += h * f(__a )
y += (h / 2.0) * f(__a )
return y
def _lowercase( __a : Tuple , __a : str , __a : Union[str, Any] ):
a__ =a + h
while x < (b - h):
yield x
a__ =x + h
def _lowercase( __a : Dict ): # enter your function here
a__ =(x - 0) * (x - 0)
return y
def _lowercase( ):
a__ =0.0 # Lower bound of integration
a__ =1.0 # Upper bound of integration
a__ =10.0 # define number of steps or resolution
a__ =[a, b] # define boundary of integration
a__ =method_a(__a , __a )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 20 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A ( UpperCamelCase__ ):
@slow
@require_torch
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__magic_name__ : int =BertTokenizer.from_pretrained("""bert-base-uncased""" )
__magic_name__ : str =bertabert.config.encoder.vocab_size
__magic_name__ : Union[str, Any] =tokenizer.sep_token_id
__magic_name__ : Optional[Any] =tokenizer.cls_token_id
__magic_name__ : str =1_28
__magic_name__ : List[str] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__magic_name__ : Union[str, Any] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__magic_name__ : Union[str, Any] =train_dataset.select(range(32 ) )
__magic_name__ : str =val_dataset.select(range(16 ) )
__magic_name__ : int =4
def _map_to_encoder_decoder_inputs(__snake_case :Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__magic_name__ : Dict =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__snake_case , max_length=5_12 )
__magic_name__ : Dict =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__snake_case , max_length=1_28 )
__magic_name__ : Optional[int] =inputs.input_ids
__magic_name__ : Tuple =inputs.attention_mask
__magic_name__ : Any =outputs.input_ids
__magic_name__ : Tuple =outputs.input_ids.copy()
__magic_name__ : Union[str, Any] =[
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__magic_name__ : List[str] =outputs.attention_mask
assert all(len(__snake_case ) == 5_12 for x in inputs.input_ids )
assert all(len(__snake_case ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(__snake_case :Tuple ):
__magic_name__ : Tuple =pred.label_ids
__magic_name__ : Any =pred.predictions
# all unnecessary tokens are removed
__magic_name__ : Optional[int] =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : List[Any] =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__magic_name__ : Optional[Any] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(__snake_case ) )] ) / len(__snake_case )
return {"accuracy": accuracy}
# map train dataset
__magic_name__ : Dict =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__magic_name__ : List[str] =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__magic_name__ : Tuple =self.get_auto_remove_tmp_dir()
__magic_name__ : Optional[int] =SeqaSeqTrainingArguments(
output_dir=__snake_case , per_device_train_batch_size=__snake_case , per_device_eval_batch_size=__snake_case , predict_with_generate=__snake_case , evaluation_strategy="""steps""" , do_train=__snake_case , do_eval=__snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__magic_name__ : List[str] =SeqaSeqTrainer(
model=__snake_case , args=__snake_case , compute_metrics=_compute_metrics , train_dataset=__snake_case , eval_dataset=__snake_case , tokenizer=__snake_case , )
# start training
trainer.train()
| 21 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = StableDiffusionControlNetImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(lowerCAmelCase_ )
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_a = 2
_a = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = StableDiffusionControlNetImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase_ : List[str] ):
if isinstance(lowerCAmelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase_ )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase_ )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(lowerCAmelCase_ )
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_a = 2
_a = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
_a = 1_0.0
_a = 4
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ , controlnet=lowerCAmelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a = '''evil space-punk bird'''
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
_a = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
_a = pipe(
lowerCAmelCase_ , lowerCAmelCase_ , control_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 22 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class _a :
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def _UpperCAmelCase ( self ) -> bool:
return self.head == self.tail
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
self.data.append(_UpperCAmelCase )
UpperCamelCase_ = self.tail + 1
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.data[self.head]
UpperCamelCase_ = self.head + 1
return ret
def _UpperCAmelCase ( self ) -> int:
return self.tail - self.head
def _UpperCAmelCase ( self ) -> None:
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = data
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = 1
def _UpperCAmelCase ( self ) -> Any:
return self.data
def _UpperCAmelCase ( self ) -> MyNode | None:
return self.left
def _UpperCAmelCase ( self ) -> MyNode | None:
return self.right
def _UpperCAmelCase ( self ) -> int:
return self.height
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = data
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = node
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = node
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = height
def _snake_case (__lowercase):
if node is None:
return 0
return node.get_height()
def _snake_case (__lowercase , __lowercase):
if a > b:
return a
return b
def _snake_case (__lowercase):
print('left rotation node:' , node.get_data())
UpperCamelCase_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(__lowercase)
UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__lowercase)
UpperCamelCase_ = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(__lowercase)
return ret
def _snake_case (__lowercase):
print('right rotation node:' , node.get_data())
UpperCamelCase_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(__lowercase)
UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__lowercase)
UpperCamelCase_ = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(__lowercase)
return ret
def _snake_case (__lowercase):
UpperCamelCase_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowercase))
return right_rotation(__lowercase)
def _snake_case (__lowercase):
UpperCamelCase_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowercase))
return left_rotation(__lowercase)
def _snake_case (__lowercase , __lowercase):
if node is None:
return MyNode(__lowercase)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __lowercase))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
UpperCamelCase_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase_ = right_rotation(__lowercase)
else:
UpperCamelCase_ = lr_rotation(__lowercase)
else:
node.set_right(insert_node(node.get_right() , __lowercase))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
UpperCamelCase_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase_ = rl_rotation(__lowercase)
else:
UpperCamelCase_ = left_rotation(__lowercase)
UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__lowercase)
return node
def _snake_case (__lowercase):
while True:
UpperCamelCase_ = root.get_right()
if right_child is None:
break
UpperCamelCase_ = right_child
return root.get_data()
def _snake_case (__lowercase):
while True:
UpperCamelCase_ = root.get_left()
if left_child is None:
break
UpperCamelCase_ = left_child
return root.get_data()
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = root.get_left()
UpperCamelCase_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase_ = get_left_most(__lowercase)
root.set_data(__lowercase)
root.set_right(del_node(__lowercase , __lowercase))
elif left_child is not None:
UpperCamelCase_ = left_child
elif right_child is not None:
UpperCamelCase_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data')
return root
else:
root.set_left(del_node(__lowercase , __lowercase))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowercase , __lowercase))
if get_height(__lowercase) - get_height(__lowercase) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
UpperCamelCase_ = left_rotation(__lowercase)
else:
UpperCamelCase_ = rl_rotation(__lowercase)
elif get_height(__lowercase) - get_height(__lowercase) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
UpperCamelCase_ = right_rotation(__lowercase)
else:
UpperCamelCase_ = lr_rotation(__lowercase)
UpperCamelCase_ = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(__lowercase)
return root
class _a :
"""simple docstring"""
def __init__( self ) -> None:
UpperCamelCase_ = None
def _UpperCAmelCase ( self ) -> int:
return get_height(self.root )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
print('insert:' + str(_UpperCAmelCase ) )
UpperCamelCase_ = insert_node(self.root , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
print('delete:' + str(_UpperCAmelCase ) )
if self.root is None:
print('Tree is empty!' )
return
UpperCamelCase_ = del_node(self.root , _UpperCAmelCase )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
UpperCamelCase_ = ''
UpperCamelCase_ = MyQueue()
q.push(self.root )
UpperCamelCase_ = self.get_height()
if layer == 0:
return output
UpperCamelCase_ = 0
while not q.is_empty():
UpperCamelCase_ = q.pop()
UpperCamelCase_ = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_UpperCAmelCase )
q.push(_UpperCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCamelCase_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _UpperCAmelCase ) - 1:
UpperCamelCase_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _snake_case ():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
snake_case__ : Optional[Any] = AVLtree()
snake_case__ : Union[str, Any] = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 23 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = args.log_outputs
SCREAMING_SNAKE_CASE : str = "_".join(args.dataset.split("/") + [args.config, args.split])
# load metric
SCREAMING_SNAKE_CASE : List[Any] = load_metric("wer")
SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric("cer")
# compute metrics
SCREAMING_SNAKE_CASE : Dict = wer.compute(references=result["target"] , predictions=result["prediction"])
SCREAMING_SNAKE_CASE : Optional[Any] = cer.compute(references=result["target"] , predictions=result["prediction"])
# print & log results
SCREAMING_SNAKE_CASE : Optional[Any] = f"WER: {wer_result}\nCER: {cer_result}"
print(_a)
with open(f"{dataset_id}_eval_results.txt" , "w") as f:
f.write(_a)
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE : str = f"log_{dataset_id}_predictions.txt"
SCREAMING_SNAKE_CASE : str = f"log_{dataset_id}_targets.txt"
with open(_a , "w") as p, open(_a , "w") as t:
# mapping function to write output
def write_to_file(_a , _a):
p.write(f"{i}" + "\n")
p.write(batch["prediction"] + "\n")
t.write(f"{i}" + "\n")
t.write(batch["target"] + "\n")
result.map(_a , with_indices=_a)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE : Tuple = re.sub(_a , "" , text.lower())
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE : List[Any] = " ".join(text.split(_a))
return text
def lowerCamelCase__ ( _a):
# load dataset
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_a)
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(args.model_id)
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE : List[str] = dataset.cast_column("audio" , Audio(sampling_rate=_a))
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE : Tuple = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE : int = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device)
# map function to decode audio
def map_to_pred(_a):
SCREAMING_SNAKE_CASE : Optional[int] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s)
SCREAMING_SNAKE_CASE : Optional[Any] = prediction["text"]
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_text(batch["sentence"])
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE : Optional[Any] = dataset.map(_a , remove_columns=dataset.column_names)
# compute and log_results
# do not change function below
log_results(_a , _a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
a_ = parser.parse_args()
main(args) | 25 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
__snake_case : str = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowerCamelCase )] )
__snake_case : Optional[Any] = np.array(_lowerCamelCase )
__snake_case : List[str] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _lowerCamelCase ) ) , x.transpose() ) , _lowerCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
__snake_case : Optional[int] = (1, 2, 1)
__snake_case : Optional[int] = (1, 1, 0, 7)
__snake_case : List[Any] = SARIMAX(
_lowerCamelCase , exog=_lowerCamelCase , order=_lowerCamelCase , seasonal_order=_lowerCamelCase )
__snake_case : List[Any] = model.fit(disp=_lowerCamelCase , maxiter=600 , method="""nm""" )
__snake_case : List[Any] = model_fit.predict(1 , len(_lowerCamelCase ) , exog=[test_match] )
return result[0]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
__snake_case : Optional[Any] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[Any] = regressor.predict(_lowerCamelCase )
return y_pred[0]
def _a ( _lowerCamelCase ) -> float:
"""simple docstring"""
train_user.sort()
__snake_case : List[str] = np.percentile(_lowerCamelCase , 25 )
__snake_case : Optional[int] = np.percentile(_lowerCamelCase , 75 )
__snake_case : List[Any] = qa - qa
__snake_case : List[str] = qa - (iqr * 0.1)
return low_lim
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Tuple = 0
__snake_case : str = 0
for i in list_vote:
if i > actual_result:
__snake_case : str = not_safe + 1
else:
if abs(abs(_lowerCamelCase ) - abs(_lowerCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__UpperCamelCase = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
__UpperCamelCase = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__UpperCamelCase = Normalizer().fit_transform(data_input_df.values)
# split data
__UpperCamelCase = normalize_df[:, 2].tolist()
__UpperCamelCase = normalize_df[:, 0].tolist()
__UpperCamelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__UpperCamelCase = normalize_df[:, [1, 2]].tolist()
__UpperCamelCase = x[: len(x) - 1]
__UpperCamelCase = x[len(x) - 1 :]
# for linear regression & sarimax
__UpperCamelCase = total_date[: len(total_date) - 1]
__UpperCamelCase = total_user[: len(total_user) - 1]
__UpperCamelCase = total_match[: len(total_match) - 1]
__UpperCamelCase = total_date[len(total_date) - 1 :]
__UpperCamelCase = total_user[len(total_user) - 1 :]
__UpperCamelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
__UpperCamelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__UpperCamelCase = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 26 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'nllb-moe'
__magic_name__ = ['past_key_values']
__magic_name__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case_=12_8112 , snake_case_=1024 , snake_case_=12 , snake_case_=4096 , snake_case_=16 , snake_case_=12 , snake_case_=4096 , snake_case_=16 , snake_case_=0.05 , snake_case_=0.05 , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=1024 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_="float32" , snake_case_=False , snake_case_=128 , snake_case_=64 , snake_case_=4 , snake_case_=4 , snake_case_=0.001 , snake_case_=0.001 , snake_case_="all" , snake_case_=False , snake_case_=False , snake_case_=1.0 , snake_case_=0.2 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=False , **snake_case_ , ):
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = router_z_loss_coef
_A = router_aux_loss_coef
_A = decoder_sparse_step
_A = encoder_sparse_step
_A = num_experts
_A = expert_capacity
_A = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
_A = router_dtype
_A = router_ignore_padding_tokens
_A = batch_prioritized_routing
_A = second_expert_policy
_A = normalize_router_prob_before_dropping
_A = moe_eval_capacity_token_fraction
_A = moe_token_dropout
_A = output_router_logits
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
| 27 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( A ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError()
| 28 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.0_2 , UpperCAmelCase=0.9 , UpperCAmelCase=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_size
lowerCamelCase_ = tubelet_size
lowerCamelCase_ = num_frames
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = mask_ratio
lowerCamelCase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase_ = int(mask_ratio * self.seq_length )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = VideoMAEModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = VideoMAEForPreTraining(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ = torch.ones((self.num_masks,) )
lowerCamelCase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase_ = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# model only returns predictions for masked patches
lowerCamelCase_ = mask.sum().item()
lowerCamelCase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Optional[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a__: List[Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a__: Tuple = False
a__: List[str] = False
a__: Optional[Any] = False
a__: List[str] = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = VideoMAEModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase_ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase_ = bool_masked_pos.to(UpperCAmelCase )
if return_labels:
if model_class in [
*get_values(UpperCAmelCase ),
]:
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = VideoMAEModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
if not self.has_attentions:
pass
else:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase_ = len(UpperCAmelCase )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(UpperCAmelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase__ ( self ):
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowerCamelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self ):
pass
def lowercase ( ):
lowerCamelCase_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' )
lowerCamelCase_ = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCAmelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCAmelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# add boolean mask, indicating which patches to mask
lowerCamelCase_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase_ = torch.load(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
# verify the logits
lowerCamelCase_ = torch.Size([1, 1408, 1536] )
lowerCamelCase_ = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=UpperCAmelCase )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase_ = torch.tensor([0.5_1_4_2] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=UpperCAmelCase ).to(
UpperCAmelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
lowerCamelCase_ = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCAmelCase , atol=1e-4 ) )
| 29 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
from __future__ import annotations
from math import pow, sqrt
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowercase , 2 ) + pow(_lowercase , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = None
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class()
self.assertIsNotNone(_lowerCAmelCase ) | 31 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
def A__ ( SCREAMING_SNAKE_CASE_ : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase = grid[row_n]
_UpperCAmelCase = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 9.80665
def __snake_case ( _lowercase ,_lowercase ,_lowercase = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 34 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> tuple[float, list[float]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(range(len(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Any = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
SCREAMING_SNAKE_CASE__ : float = 0
SCREAMING_SNAKE_CASE__ : list[float] = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE__ : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
from __future__ import annotations
__lowercase : List[Any] = [True] * 1_000_001
__lowercase : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
__lowercase : List[Any] = False
i += 1
def lowercase ( __A : int ) -> bool:
'''simple docstring'''
return seive[n]
def lowercase ( __A : int ) -> bool:
'''simple docstring'''
return any(digit in """02468""" for digit in str(__A ) )
def lowercase ( __A : int = 100_0000 ) -> list[int]:
'''simple docstring'''
snake_case : Optional[int] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__A ) and not contains_an_even_digit(__A ):
snake_case : Any = str(__A )
snake_case : Tuple = [int(str_num[j:] + str_num[:j] ) for j in range(len(__A ) )]
if all(is_prime(__A ) for i in list_nums ):
result.append(__A )
return result
def lowercase ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 36 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
UpperCamelCase : str = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase : Optional[int] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 37 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""only integers accepted as input""" )
else:
snake_case__ : str = str(abs(__magic_name__ ) )
snake_case__ : Dict = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("""""".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 38 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase_ = '''scheduler_config.json'''
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Dict = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : int = 5
SCREAMING_SNAKE_CASE : Optional[Any] = 6
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : Optional[int] = 8
SCREAMING_SNAKE_CASE : Union[str, Any] = 9
SCREAMING_SNAKE_CASE : Any = 10
SCREAMING_SNAKE_CASE : Optional[int] = 11
SCREAMING_SNAKE_CASE : int = 12
SCREAMING_SNAKE_CASE : Any = 13
SCREAMING_SNAKE_CASE : Any = 14
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : torch.FloatTensor
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[int] = True
@classmethod
def snake_case__( cls : int , _UpperCamelCase : Dict[str, Any] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Dict=False , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
snake_case_, snake_case_, snake_case_ = cls.load_config(
pretrained_model_name_or_path=_UpperCamelCase , subfolder=_UpperCamelCase , return_unused_kwargs=_UpperCamelCase , return_commit_hash=_UpperCamelCase , **_UpperCamelCase , )
return cls.from_config(_UpperCamelCase , return_unused_kwargs=_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Optional[int] , _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : bool = False , **_UpperCamelCase : Union[str, Any] ) ->Any:
self.save_config(save_directory=_UpperCamelCase , push_to_hub=_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : Dict ) ->List[str]:
return self._get_compatibles()
@classmethod
def snake_case__( cls : List[Any] ) ->List[Any]:
snake_case_ = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ = importlib.import_module(__name__.split('''.''' )[0] )
snake_case_ = [
getattr(_UpperCamelCase , _UpperCamelCase ) for c in compatible_classes_str if hasattr(_UpperCamelCase , _UpperCamelCase )
]
return compatible_classes | 39 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = self.task_name.lower()
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = "train"
UpperCAmelCase__ : List[Any] = "dev"
UpperCAmelCase__ : Any = "test"
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = Split.train, SCREAMING_SNAKE_CASE_ = None, ) -> List[Any]:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py', SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Dict = args
UpperCamelCase : Any = glue_processors[args.task_name]()
UpperCamelCase : str = glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
try:
UpperCamelCase : Optional[int] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
UpperCamelCase : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""", )
UpperCamelCase : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase : Optional[int] = label_list[2], label_list[1]
UpperCamelCase : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase : Optional[int] = cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
UpperCamelCase : Tuple = time.time()
UpperCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""", time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase : List[str] = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase : Optional[int] = examples[:limit_length]
UpperCamelCase : Optional[int] = glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, max_length=args.max_seq_length, label_list=SCREAMING_SNAKE_CASE_, output_mode=self.output_mode, )
UpperCamelCase : List[Any] = time.time()
torch.save(self.features, SCREAMING_SNAKE_CASE_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self, SCREAMING_SNAKE_CASE_ ) -> InputFeatures:
return self.features[i]
def snake_case_ ( self ) -> Optional[int]:
return self.label_list
| 40 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 41 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = [10, 20, 30, 40, 50, 60]
lowerCamelCase_ = [2, 4, 6, 8, 10, 12]
lowerCamelCase_ = 100
self.assertEqual(kp.calc_profit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 210 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'max_weight must greater than zero.' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'Weight can not be negative.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'Profit can not be negative.' )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'max_weight must greater than zero.' )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 42 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
_lowerCamelCase : Tuple = True
# 0 and 1 are none primes.
if number <= 1:
_lowerCamelCase : int = False
for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_lowerCamelCase : Optional[int] = False
break
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool"
return status
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_lowerCamelCase : Dict = list(range(2 , n + 1 ) )
_lowerCamelCase : Dict = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_lowerCamelCase : List[Any] = 0
# filters actual prime numbers.
_lowerCamelCase : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
_lowerCamelCase : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowerCAmelCase ):
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
_lowerCamelCase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
_lowerCamelCase : Optional[Any] = 2
_lowerCamelCase : Optional[Any] = number
if number == 0 or number == 1:
ans.append(_lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCAmelCase ):
while quotient != 1:
if is_prime(_lowerCAmelCase ) and (quotient % factor == 0):
ans.append(_lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCamelCase : Union[str, Any] = 0
# prime factorization of 'number'
_lowerCamelCase : Tuple = prime_factorization(_lowerCAmelCase )
_lowerCamelCase : int = max(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCamelCase : int = 0
# prime factorization of 'number'
_lowerCamelCase : List[Any] = prime_factorization(_lowerCAmelCase )
_lowerCamelCase : Any = min(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase )
), "'number' must been an int, even and > 2"
_lowerCamelCase : int = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_lowerCamelCase : Any = get_prime_numbers(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = len(_lowerCAmelCase )
# run variable for while-loops.
_lowerCamelCase : List[str] = 0
_lowerCamelCase : str = None
# exit variable. for break up the loops
_lowerCamelCase : List[Any] = True
while i < len_pn and loop:
_lowerCamelCase : List[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_lowerCamelCase : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (len(_lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
"""simple docstring"""
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_lowerCamelCase : Optional[int] = 0
while numbera != 0:
_lowerCamelCase : Tuple = numbera % numbera
_lowerCamelCase : List[Any] = numbera
_lowerCamelCase : str = rest
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple ):
"""simple docstring"""
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_lowerCamelCase : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_lowerCamelCase : Optional[int] = prime_factorization(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = prime_factorization(_lowerCAmelCase )
elif numbera == 1 or numbera == 1:
_lowerCamelCase : List[str] = []
_lowerCamelCase : Any = []
_lowerCamelCase : Dict = max(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_lowerCamelCase : Any = prime_fac_a.count(_lowerCAmelCase )
_lowerCamelCase : int = prime_fac_a.count(_lowerCAmelCase )
for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ):
ans *= n
else:
_lowerCamelCase : str = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_lowerCamelCase : Optional[Any] = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime(
_lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
assert (
is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_lowerCamelCase : Any = p_number_a + 1 # jump to the next number
_lowerCamelCase : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(_lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
_lowerCamelCase : int = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
_lowerCamelCase : Union[str, Any] = get_divisors(_lowerCAmelCase )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_lowerCamelCase : Any = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
_lowerCamelCase : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
_lowerCamelCase : int = 0
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 1 # this will be return
for _ in range(n - 1 ):
_lowerCamelCase : str = ans
ans += fiba
_lowerCamelCase : Dict = tmp
return ans | 44 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case : ClassVar[Features] = Features({"""image""": Image()} )
_snake_case : ClassVar[Features] = Features({"""labels""": ClassLabel} )
_snake_case : str = "image"
_snake_case : str = "labels"
def __a ( self :Any , lowerCamelCase__ :Dict ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCamelCase__ :Union[str, Any] = copy.deepcopy(self )
UpperCamelCase__ :Any = self.label_schema.copy()
UpperCamelCase__ :int = features[self.label_column]
UpperCamelCase__ :Optional[Any] = label_schema
return task_template
@property
def __a ( self :List[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
} | 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = False ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : str = F"""Expected string as input, found {type(_lowerCamelCase )}"""
raise ValueError(_lowerCamelCase )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : List[Any] = F"""Expected boolean as use_pascal parameter, found {type(_lowerCamelCase )}"""
raise ValueError(_lowerCamelCase )
_lowerCamelCase : str = input_str.split("_" )
_lowerCamelCase : str = 0 if use_pascal else 1
_lowerCamelCase : List[Any] = words[start_index:]
_lowerCamelCase : Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCamelCase : int = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod() | 46 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : List[str]=1_0 ):
__a : Optional[Any] = []
for _ in range(lowerCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=1_0 ):
__a : Union[str, Any] = []
for step in range(lowerCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[Any] = os.path.join(lowerCamelCase_ , 'schedule.bin' )
torch.save(scheduler.state_dict() , lowerCamelCase_ )
__a : Tuple = torch.load(lowerCamelCase_ )
scheduler.load_state_dict(lowerCamelCase_ )
return lrs
@require_torch
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__a : List[str] = torch.tensor([0.4, 0.2, -0.5] )
__a : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : List[Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
__a : int = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
__a : Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : int = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE__ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE__ , scale_parameter=SCREAMING_SNAKE_CASE__ , warmup_init=SCREAMING_SNAKE_CASE__ , )
for _ in range(1_0_0_0 ):
__a : str = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
__SCREAMING_SNAKE_CASE : str = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__SCREAMING_SNAKE_CASE : str = 10
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict=None ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ , msg=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : str = {'num_warmup_steps': 2, 'num_training_steps': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a : Any = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__a , __a : str = data
__a : Tuple = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a : List[Any] = unwrap_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
__a : Dict = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE__ ) # wrap to test picklability of the schedule
__a : Optional[int] = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , msg=f'''failed for {scheduler_func} in save and reload''' )
class _UpperCamelCase:
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : str = fn
def __call__( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return self.fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : List[Any] = list(map(self , scheduler.lr_lambdas ) )
| 47 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
def A ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] = None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else ""
# apply OCR
lowerCAmelCase__ = to_pil_image(UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(UpperCamelCase_ , lang=UpperCamelCase_ , output_type="dict" , config=UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(UpperCamelCase_ ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase_ )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = ['pixel_values']
def __init__( self : List[str] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = "" , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = (size["height"], size["width"])
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(__magic_name__ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ ,lowerCAmelCase__ = apply_tesseract(__magic_name__ , __magic_name__ , __magic_name__ )
words_batch.append(__magic_name__ )
boxes_batch.append(__magic_name__ )
if do_resize:
lowerCAmelCase__ = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCAmelCase__ = [flip_channel_order(__magic_name__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = BatchFeature(data={"pixel_values": images} , tensor_type=__magic_name__ )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data
| 48 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_lowercase : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Tuple , *_lowercase : Optional[int] , **_lowercase : Union[str, Any] ):
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 49 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ):
# test for the above condition
self.test()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 0
lowerCamelCase__ = False
while not completed:
if counter == 1:
self.reset()
lowerCamelCase__ = self.advance()
if not self.does_advance(_lowerCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.update(_lowerCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def UpperCamelCase_ ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCamelCase_ ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCamelCase_ ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCamelCase_ ( self ,_lowerCAmelCase=False ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
super(_lowerCAmelCase ,self ).__init__()
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or len(_lowerCAmelCase ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowerCamelCase__ = token_ids
lowerCamelCase__ = len(self.token_ids )
lowerCamelCase__ = -1 # the index of the currently fulfilled step
lowerCamelCase__ = False
def UpperCamelCase_ ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
if self.does_advance(_lowerCAmelCase ):
self.fulfilled_idx += 1
lowerCamelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCamelCase__ = True
lowerCamelCase__ = completed
else:
# failed to make progress.
lowerCamelCase__ = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self ):
lowerCamelCase__ = False
lowerCamelCase__ = 0
def UpperCamelCase_ ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self ,_lowerCAmelCase=False ):
lowerCamelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
lowerCamelCase__ = self.seqlen
lowerCamelCase__ = self.fulfilled_idx
lowerCamelCase__ = self.completed
return new_constraint
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=True ):
lowerCamelCase__ = max([len(_lowerCAmelCase ) for one in nested_token_ids] )
lowerCamelCase__ = {}
for token_ids in nested_token_ids:
lowerCamelCase__ = root
for tidx, token_id in enumerate(_lowerCAmelCase ):
if token_id not in level:
lowerCamelCase__ = {}
lowerCamelCase__ = level[token_id]
if no_subsets and self.has_subsets(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
F''' {nested_token_ids}.''' )
lowerCamelCase__ = root
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.trie
for current_token in current_seq:
lowerCamelCase__ = start[current_token]
lowerCamelCase__ = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.next_tokens(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 0
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = list(root.values() )
if len(_lowerCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCAmelCase ) for nn in next_nodes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.count_leaves(_lowerCAmelCase )
return len(_lowerCAmelCase ) != leaf_count
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
super(_lowerCAmelCase ,self ).__init__()
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or len(_lowerCAmelCase ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowerCamelCase__ = DisjunctiveTrie(_lowerCAmelCase )
lowerCamelCase__ = nested_token_ids
lowerCamelCase__ = self.trie.max_height
lowerCamelCase__ = []
lowerCamelCase__ = False
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.trie.next_tokens(self.current_seq )
if len(_lowerCAmelCase ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
if self.does_advance(_lowerCAmelCase ):
self.current_seq.append(_lowerCAmelCase )
lowerCamelCase__ = True
else:
lowerCamelCase__ = True
self.reset()
lowerCamelCase__ = self.trie.reached_leaf(self.current_seq )
lowerCamelCase__ = completed
return stepped, completed, reset
def UpperCamelCase_ ( self ):
lowerCamelCase__ = False
lowerCamelCase__ = []
def UpperCamelCase_ ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self ,_lowerCAmelCase=False ):
lowerCamelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCamelCase__ = self.seqlen
lowerCamelCase__ = self.current_seq
lowerCamelCase__ = self.completed
return new_constraint
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = constraints
# max # of steps required to fulfill a given constraint
lowerCamelCase__ = max([c.seqlen for c in constraints] )
lowerCamelCase__ = len(_lowerCAmelCase )
lowerCamelCase__ = False
self.init_state()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = []
lowerCamelCase__ = None
lowerCamelCase__ = [constraint.copy(stateful=_lowerCAmelCase ) for constraint in self.constraints]
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self ):
lowerCamelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCamelCase__ = constraint.advance()
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
token_list.append(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
token_list.extend(_lowerCAmelCase )
else:
lowerCamelCase__ = self.inprogress_constraint.advance()
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
token_list.append(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
token_list.extend(_lowerCAmelCase )
if len(_lowerCAmelCase ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCamelCase__ , lowerCamelCase__ = self.add(_lowerCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowerCamelCase__ , lowerCamelCase__ = False, False
if self.completed:
lowerCamelCase__ = True
lowerCamelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.inprogress_constraint.update(_lowerCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCAmelCase ) )
lowerCamelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCamelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCamelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = pending_constraint.update(_lowerCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_lowerCAmelCase )
lowerCamelCase__ = None
if not complete and stepped:
lowerCamelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCamelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCamelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self ,_lowerCAmelCase=True ):
lowerCamelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCamelCase__ = [
constraint.copy(stateful=_lowerCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCamelCase__ = self.inprogress_constraint.copy(stateful=_lowerCAmelCase )
lowerCamelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 50 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
'''simple docstring'''
import enum
import shutil
import sys
a__ , a__ : List[str] = shutil.get_terminal_size()
a__ : Any = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
'''simple docstring'''
_lowerCamelCase =0
_lowerCamelCase =1
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]="" ) -> Tuple:
"""simple docstring"""
sys.stdout.write(str(SCREAMING_SNAKE_CASE_ ) + end )
sys.stdout.flush()
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" ) -> str:
"""simple docstring"""
forceWrite(f"\u001b[{color}m{content}\u001b[0m" , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
forceWrite('''\r''' )
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
"""simple docstring"""
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 51 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
"""simple docstring"""
import math
import random
def __A ( a_ :float , a_ :bool = False) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value))
# Initial Value
A = 0.02
def __A ( a_ :int , a_ :int) -> float:
__a : List[Any] = float(2 * (random.randint(1 , 1_00)) - 1)
for _ in range(a_):
# Forward propagation
__a : int = sigmoid_function(INITIAL_VALUE * weight)
# How much did we miss?
__a : int = (expected / 1_00) - layer_a
# Error delta
__a : Dict = layer_1_error * sigmoid_function(a_ , a_)
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
A = int(input('''Expected value: '''))
A = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations)) | 52 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =split_dict._to_yaml_list()
assert len(lowercase__ ) == len(lowercase__ )
UpperCAmelCase_ =SplitDict._from_yaml_list(lowercase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase_ =None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase_ =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=lowercase__ ), SplitInfo(dataset_name="my_dataset" )] )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 54 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int ,A : Dict ,A : int=13 ,A : Dict=7 ,A : Dict=False ,A : List[str]=True ,A : Optional[int]=False ,A : Dict=False ,A : Union[str, Any]=19 ,A : str=32 ,A : int=5 ,A : Any=4 ,A : List[str]=37 ,A : Union[str, Any]="gelu" ,A : int=0.1 ,A : int=0.1 ,A : Tuple=5_12 ,A : List[str]=16 ,A : Optional[int]=2 ,A : Optional[int]=0.02 ,A : str=3 ,A : Optional[int]=4 ,A : Any=None ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : List[Any] ):
__A = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=A ,esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} ,)
return config
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ,A : List[Any] ,A : Optional[int] ,A : Tuple ,A : List[Any] ,A : List[Any] ):
__A = EsmForProteinFolding(config=A ).float()
model.to(A )
model.eval()
__A = model(A ,attention_mask=A )
__A = model(A )
__A = model(A )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = (EsmForProteinFolding,) if is_torch_available() else ()
snake_case_ = ()
snake_case_ = {} if is_torch_available() else {}
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = EsmFoldModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip("Does not support attention outputs" )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip
def UpperCamelCase_ ( self : Any ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def UpperCamelCase_ ( self : Any ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def UpperCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def UpperCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase_ ( self : List[str] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase_ ( self : int ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip("ESMFold only has one output format." )
def UpperCamelCase_ ( self : int ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def UpperCamelCase_ ( self : List[str] ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def UpperCamelCase_ ( self : List[str] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def UpperCamelCase_ ( self : int ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def UpperCamelCase_ ( self : int ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def UpperCamelCase_ ( self : int ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def UpperCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase_ ( self : Tuple ):
pass
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : str ):
__A = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
__A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__A = model(A )["positions"]
__A = torch.tensor([2.58_28, 0.79_93, -10.93_34] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,A ,atol=1E-4 ) )
| 55 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : Tuple = 32
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = 1_6 , UpperCAmelCase__ = "bert-base-cased" ) -> Optional[int]:
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_: Union[str, Any] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase_: Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
UpperCamelCase_: List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
model.eval()
UpperCamelCase_: Any = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_: Dict = model(**UpperCAmelCase__ )
UpperCamelCase_: List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ ,UpperCamelCase_: Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
UpperCamelCase_: List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_: Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
UpperCamelCase_: Any = metric.compute()
return eval_metric["accuracy"]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
# Initialize accelerator
UpperCamelCase_: Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: int = config['lr']
UpperCamelCase_: List[Any] = int(config['num_epochs'] )
UpperCamelCase_: Dict = int(config['seed'] )
UpperCamelCase_: Optional[Any] = int(config['batch_size'] )
UpperCamelCase_: int = args.model_name_or_path
set_seed(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: List[str] = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Dict = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
UpperCamelCase_: List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_: Any = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_: Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: List[Any] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_: Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
UpperCamelCase_: Union[str, Any] = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_: Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_: str = 0
UpperCamelCase_: int = evaluate.load('glue' , 'mrpc' )
UpperCamelCase_: int = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase_: Union[str, Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase_: Union[str, Any] = args.resume_from_checkpoint.split('epoch_' )[1]
UpperCamelCase_: Tuple = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase_: Optional[int] = int(UpperCAmelCase__ ) + 1
UpperCamelCase_: str = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
UpperCamelCase_: Dict = json.load(UpperCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase_: Any = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: str = model(**UpperCAmelCase__ )
UpperCamelCase_: List[str] = outputs.loss
UpperCamelCase_: str = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase_: Tuple = F'''epoch_{epoch}'''
UpperCamelCase_: Union[str, Any] = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = accuracy
UpperCamelCase_: List[str] = lr_scheduler.get_lr()[0]
UpperCamelCase_: Optional[Any] = optimizer.param_groups[0]['lr']
UpperCamelCase_: Dict = epoch
UpperCamelCase_: Optional[int] = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case () -> Optional[Any]:
UpperCamelCase_: Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase__ , default=2 , help='Number of train epochs.' , )
UpperCamelCase_: str = parser.parse_args()
UpperCamelCase_: Dict = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main() | 57 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _lowerCAmelCase :
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = tokenizer
snake_case_ : Union[str, Any] = skip_prompt
snake_case_ : List[str] = decode_kwargs
# variables used in the streaming process
snake_case_ : Union[str, Any] = []
snake_case_ : Tuple = 0
snake_case_ : str = True
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
snake_case_ : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case_ : Union[str, Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case_ : Optional[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
snake_case_ : List[Any] = text[self.print_len :]
snake_case_ : Optional[int] = []
snake_case_ : Dict = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowercase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case_ : List[str] = text[self.print_len :]
self.print_len += len(_lowercase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case_ : Union[str, Any] = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(_lowercase )
self.on_finalized_text(_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if len(self.token_cache ) > 0:
snake_case_ : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case_ : int = text[self.print_len :]
snake_case_ : str = []
snake_case_ : str = 0
else:
snake_case_ : Optional[Any] = """"""
snake_case_ : Tuple = True
self.on_finalized_text(_lowercase , stream_end=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False ) -> List[str]:
'''simple docstring'''
print(_lowercase , flush=_lowercase , end="""""" if not stream_end else None )
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = False , _lowercase = None , **_lowercase ) -> List[str]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase , **_lowercase )
snake_case_ : str = Queue()
snake_case_ : int = None
snake_case_ : Union[str, Any] = timeout
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False ) -> List[Any]:
'''simple docstring'''
self.text_queue.put(_lowercase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return self
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 58 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "sew-d"
def __init__(self : Optional[int] , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int=("p2c", "c2p") , UpperCAmelCase_ : Tuple="layer_norm" , UpperCAmelCase_ : Any="gelu_python" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=1E-7 , UpperCAmelCase_ : List[Any]=1E-5 , UpperCAmelCase_ : Union[str, Any]="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Tuple=128 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=0.05 , UpperCAmelCase_ : Union[str, Any]=10 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[str]="mean" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : int=2 , **UpperCAmelCase_ : Tuple , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: str =hidden_size
lowerCamelCase__: int =feat_extract_norm
lowerCamelCase__: str =feat_extract_activation
lowerCamelCase__: List[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =list(UpperCAmelCase_)
lowerCamelCase__: Tuple =list(UpperCAmelCase_)
lowerCamelCase__: str =conv_bias
lowerCamelCase__: Any =num_conv_pos_embeddings
lowerCamelCase__: List[Any] =num_conv_pos_embedding_groups
lowerCamelCase__: Optional[int] =len(self.conv_dim)
lowerCamelCase__: Tuple =num_hidden_layers
lowerCamelCase__: Tuple =intermediate_size
lowerCamelCase__: Optional[Any] =squeeze_factor
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Dict =position_buckets
lowerCamelCase__: List[Any] =share_att_key
lowerCamelCase__: Optional[Any] =relative_attention
lowerCamelCase__: List[str] =norm_rel_ebd
lowerCamelCase__: List[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =hidden_act
lowerCamelCase__: str =num_attention_heads
lowerCamelCase__: str =hidden_dropout
lowerCamelCase__: List[str] =attention_dropout
lowerCamelCase__: str =activation_dropout
lowerCamelCase__: int =feat_proj_dropout
lowerCamelCase__: Tuple =final_dropout
lowerCamelCase__: Any =layer_norm_eps
lowerCamelCase__: Optional[Any] =feature_layer_norm_eps
lowerCamelCase__: Dict =initializer_range
lowerCamelCase__: int =vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: Any =apply_spec_augment
lowerCamelCase__: List[Any] =mask_time_prob
lowerCamelCase__: Optional[int] =mask_time_length
lowerCamelCase__: Optional[Any] =mask_time_min_masks
lowerCamelCase__: Tuple =mask_feature_prob
lowerCamelCase__: int =mask_feature_length
lowerCamelCase__: Optional[int] =mask_feature_min_masks
# ctc loss
lowerCamelCase__: Dict =ctc_loss_reduction
lowerCamelCase__: Any =ctc_zero_infinity
# sequence classification
lowerCamelCase__: Any =use_weighted_layer_sum
lowerCamelCase__: Optional[Any] =classifier_proj_size
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 59 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : int = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
snake_case_ : Tuple = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
snake_case_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
snake_case_ : List[str] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
snake_case_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Tuple = dct.pop(_UpperCamelCase )
snake_case_ : Any = val
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if "handwritten" in checkpoint_url:
snake_case_ : Tuple = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case_ : Tuple = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
snake_case_ : Optional[int] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = ViTConfig(image_size=384 , qkv_bias=_UpperCamelCase )
snake_case_ : str = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
snake_case_ : Optional[int] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
snake_case_ : str = 1_024
snake_case_ : Optional[Any] = 4_096
snake_case_ : Union[str, Any] = 24
snake_case_ : Tuple = 16
snake_case_ : Any = 1_024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case_ : List[Any] = False
snake_case_ : Union[str, Any] = '''relu'''
snake_case_ : Dict = 1_024
snake_case_ : List[str] = True
snake_case_ : List[Any] = False
snake_case_ : List[Any] = False
# load HuggingFace model
snake_case_ : Any = ViTModel(_UpperCamelCase , add_pooling_layer=_UpperCamelCase )
snake_case_ : Any = TrOCRForCausalLM(_UpperCamelCase )
snake_case_ : int = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
snake_case_ : Any = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' , check_hash=_UpperCamelCase )['''model''']
snake_case_ : Optional[Any] = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
snake_case_ : Dict = state_dict.pop(_UpperCamelCase )
if key.startswith('''decoder''' ) and "output_projection" not in key:
snake_case_ : List[str] = val
else:
snake_case_ : Optional[Any] = val
# load state dict
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
snake_case_ : int = ViTImageProcessor(size=encoder_config.image_size )
snake_case_ : Optional[Any] = RobertaTokenizer.from_pretrained('''roberta-large''' )
snake_case_ : Dict = TrOCRProcessor(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = processor(images=prepare_img(_UpperCamelCase ) , return_tensors='''pt''' ).pixel_values
# verify logits
snake_case_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
snake_case_ : int = model(pixel_values=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
snake_case_ : Tuple = outputs.logits
snake_case_ : Union[str, Any] = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
snake_case_ : Any = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
snake_case_ : Tuple = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
snake_case_ : str = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
snake_case_ : List[str] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCamelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 60 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _A ( ):
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> str:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [128, 64, 32, 16, 8] )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function("hello" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def a ( self : Union[str, Any] ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self : List[str] ) -> Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self : Dict ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def a ( self : Optional[Any] ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = release_memory(SCREAMING_SNAKE_CASE__ )
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE__ )
| 61 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError("only integers accepted as input" )
else:
SCREAMING_SNAKE_CASE : Optional[int] = str(abs(lowercase ) )
SCREAMING_SNAKE_CASE : str = [list(lowercase ) for char in range(len(lowercase ) )]
for index in range(len(lowercase ) ):
num_transpositions[index].pop(lowercase )
return max(
int("".join(list(lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 62 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Any , __lowercase : Union[str, Any]=3 , __lowercase : str=32 , __lowercase : List[str]=3 , __lowercase : Union[str, Any]=10 , __lowercase : int=[10, 20, 30, 40] , __lowercase : Any=[1, 1, 2, 1] , __lowercase : Union[str, Any]=True , __lowercase : Optional[Any]=True , __lowercase : str="relu" , __lowercase : int=3 , __lowercase : Any=None , ) -> Optional[int]:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : str = embeddings_size
__UpperCAmelCase : Union[str, Any] = hidden_sizes
__UpperCAmelCase : str = depths
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Any = len(__lowercase )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : str ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self : int , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = TFResNetModel(config=__lowercase )
__UpperCAmelCase : Any = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Any ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[Any] = TFResNetForImageClassification(__lowercase )
__UpperCAmelCase : Dict = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Union[str, Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : List[str] = False
a : List[Any] = False
a : Tuple = False
a : List[Any] = False
a : Union[str, Any] = False
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = TFResNetModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Any ) -> List[Any]:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Tuple ):
__UpperCAmelCase : Optional[int] = model_class(__lowercase )
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : Any = layer_type
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : Any ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
__UpperCAmelCase : Tuple = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : Union[str, Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowercase , atol=1e-4 ) )
| 63 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__: int= parent
SCREAMING_SNAKE_CASE__: List[Any]= batch_size
SCREAMING_SNAKE_CASE__: Dict= seq_length
SCREAMING_SNAKE_CASE__: int= is_training
SCREAMING_SNAKE_CASE__: Tuple= use_input_mask
SCREAMING_SNAKE_CASE__: List[Any]= use_token_type_ids
SCREAMING_SNAKE_CASE__: Dict= use_labels
SCREAMING_SNAKE_CASE__: Union[str, Any]= vocab_size
SCREAMING_SNAKE_CASE__: str= hidden_size
SCREAMING_SNAKE_CASE__: int= num_hidden_layers
SCREAMING_SNAKE_CASE__: Tuple= num_attention_heads
SCREAMING_SNAKE_CASE__: Dict= intermediate_size
SCREAMING_SNAKE_CASE__: List[str]= hidden_act
SCREAMING_SNAKE_CASE__: Optional[int]= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: int= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_position_embeddings
SCREAMING_SNAKE_CASE__: List[str]= type_vocab_size
SCREAMING_SNAKE_CASE__: List[str]= type_sequence_label_size
SCREAMING_SNAKE_CASE__: Optional[int]= initializer_range
SCREAMING_SNAKE_CASE__: List[str]= num_labels
SCREAMING_SNAKE_CASE__: str= num_choices
SCREAMING_SNAKE_CASE__: str= scope
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__: Tuple= random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__: str= None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__: Optional[int]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
SCREAMING_SNAKE_CASE__: Tuple= None
SCREAMING_SNAKE_CASE__: Any= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: Union[str, Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: Any= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__: List[str]= ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__: Dict= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> Any:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase , attention_mask=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.seq_length // 2
SCREAMING_SNAKE_CASE__: Any= 0
# first forward pass
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__: Tuple= ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__: int= ids_tensor((1,) , lowerCAmelCase ).item() + 1
SCREAMING_SNAKE_CASE__: Tuple= ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__: List[str]= torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__: str= torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase , attention_mask=lowerCAmelCase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__: Optional[int]= ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__: int= output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__: Optional[int]= output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Any= BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE__: List[str]= torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__: Optional[Any]= ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__: int= ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__: Optional[int]= torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__: Dict= torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase , attention_mask=lowerCAmelCase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__: Any= model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE__: int= ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__: List[Any]= output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__: Union[str, Any]= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase_ ( self , lowerCAmelCase , *lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: str= BioGptModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: int= self.num_labels
SCREAMING_SNAKE_CASE__: Optional[int]= BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Optional[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): str= config_and_inputs
SCREAMING_SNAKE_CASE__: List[str]= {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__a = (BioGptForCausalLM,) if is_torch_available() else ()
__a = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = False
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= BioGptModelTester(self )
SCREAMING_SNAKE_CASE__: Optional[int]= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: List[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__: int= type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__: int= '''left'''
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__: Optional[int]= tokenizer.eos_token
SCREAMING_SNAKE_CASE__: List[str]= model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__: List[Any]= [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer(lowerCAmelCase , return_tensors='''pt''' , padding=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= inputs['''input_ids'''].to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs['''attention_mask'''].to(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= model.generate(input_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__: int= tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__: int= tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: List[Any]= BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: Tuple= 3
SCREAMING_SNAKE_CASE__: List[str]= input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__: Union[str, Any]= input_ids.ne(1 ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: Any= BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: str= 3
SCREAMING_SNAKE_CASE__: Optional[int]= '''multi_label_classification'''
SCREAMING_SNAKE_CASE__: Dict= input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__: int= input_ids.ne(1 ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__: Union[str, Any]= BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor([[2, 4805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE__: Optional[Any]= model(lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= 42384
SCREAMING_SNAKE_CASE__: List[Any]= torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__: List[Any]= BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= model.generate(
**lowerCAmelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCAmelCase = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class __lowercase :
snake_case_ = 42
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = _str_to_version_tuple(self.version_str )
def __repr__( self : Tuple ):
'''simple docstring'''
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.major, self.minor, self.patch
def __lowercase ( self : Any ,A : List[Any] ):
'''simple docstring'''
if isinstance(A ,A ):
return Version(A )
elif isinstance(A ,A ):
return other
raise TypeError(f"{other} (type {type(A )}) cannot be compared to version." )
def __eq__( self : Union[str, Any] ,A : List[str] ):
'''simple docstring'''
try:
UpperCAmelCase__ : Optional[Any] = self._validate_operand(A )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self._validate_operand(A )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __lowercase ( cls : Any ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self.version_str
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = _VERSION_REG.match(__UpperCamelCase )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(__UpperCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return ".".join(str(__UpperCamelCase ) for v in version_tuple )
| 65 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
from __future__ import annotations
from math import pi, sqrt
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple:
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A_ :
"""simple docstring"""
def __init__( self : List[str] ,__A : List[str] ,__A : List[str]=13 ,__A : Dict=7 ,__A : int=True ,__A : Tuple=True ,__A : Tuple=True ,__A : List[Any]=True ,__A : Any=99 ,__A : Union[str, Any]=64 ,__A : Dict=32 ,__A : Any=5 ,__A : List[str]=4 ,__A : Optional[int]=37 ,__A : Optional[int]="gelu" ,__A : Any=0.1 ,__A : str=0.1 ,__A : str=512 ,__A : List[str]=16 ,__A : List[str]=2 ,__A : List[Any]=0.02 ,__A : Optional[int]=3 ,__A : Optional[Any]=4 ,__A : Optional[Any]=None ,) -> Any:
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = embedding_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = num_choices
_lowercase = scope
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase = None
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase = ids_tensor([self.batch_size] ,self.num_choices )
_lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Optional[Any] ,__A : Any ,__A : Optional[Any] ,__A : Any ,__A : str ,__A : Tuple ,__A : Optional[int] ) -> str:
_lowercase = MegatronBertModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A )
_lowercase = model(__A ,token_type_ids=__A )
_lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple ,__A : Tuple ,__A : Tuple ,__A : Tuple ,__A : Dict ,__A : Optional[int] ,__A : Dict ,__A : Any ) -> int:
_lowercase = MegatronBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Tuple ,__A : Any ,__A : Union[str, Any] ,__A : Any ,__A : Union[str, Any] ,__A : Union[str, Any] ,__A : int ,__A : Tuple ) -> Any:
_lowercase = MegatronBertForCausalLM(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Tuple ,__A : Tuple ,__A : Dict ,__A : Dict ,__A : int ,__A : Optional[Any] ,__A : Tuple ,__A : str ) -> Tuple:
_lowercase = MegatronBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def __UpperCAmelCase ( self : List[Any] ,__A : List[str] ,__A : Optional[int] ,__A : int ,__A : Optional[int] ,__A : Optional[Any] ,__A : Tuple ,__A : List[str] ) -> Optional[int]:
_lowercase = MegatronBertForPreTraining(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,next_sentence_label=__A ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def __UpperCAmelCase ( self : int ,__A : Union[str, Any] ,__A : Optional[int] ,__A : Dict ,__A : List[Any] ,__A : int ,__A : Tuple ,__A : str ) -> Dict:
_lowercase = MegatronBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
__A ,attention_mask=__A ,token_type_ids=__A ,start_positions=__A ,end_positions=__A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : List[str] ,__A : Optional[int] ,__A : Tuple ,__A : int ,__A : Any ,__A : int ,__A : int ,__A : str ) -> Any:
_lowercase = self.num_labels
_lowercase = MegatronBertForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : int ,__A : List[str] ,__A : str ,__A : Any ,__A : Union[str, Any] ,__A : Optional[int] ,__A : List[str] ,__A : List[Any] ) -> Optional[int]:
_lowercase = self.num_labels
_lowercase = MegatronBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : List[str] ,__A : List[Any] ,__A : List[Any] ,__A : Any ,__A : Optional[Any] ,__A : Dict ,__A : str ,__A : List[Any] ) -> Optional[Any]:
_lowercase = self.num_choices
_lowercase = MegatronBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase = model(
__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : Any ) -> int:
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Any = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
# test_resize_embeddings = False
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : List[Any] ,__A : Dict ,__A : Dict ,__A : List[str]=False ) -> Optional[Any]:
_lowercase = super()._prepare_for_class(__A ,__A ,return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
_lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__A )
_lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__A )
return inputs_dict
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = MegatronBertModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : Dict ) -> str:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__A )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A )
def __UpperCAmelCase ( self : List[Any] ) -> int:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> Optional[Any]:
return torch.tensor(
snake_case__ , dtype=torch.long , device=snake_case__ , )
snake_case = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.' )
def __UpperCAmelCase ( self : Any ) -> Tuple:
_lowercase = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_lowercase = os.path.join(os.environ['MYDIR'] ,__A )
_lowercase = MegatronBertModel.from_pretrained(__A )
model.to(__A )
model.half()
_lowercase = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
_lowercase = model(__A )[0]
_lowercase = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape ,__A )
_lowercase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
_lowercase = output[0, ii, jj]
_lowercase = expected[3 * ii + jj]
_lowercase = 'ii={} jj={} a={} b={}'.format(__A ,__A ,__A ,__A )
self.assertTrue(math.isclose(__A ,__A ,rel_tol=__A ,abs_tol=__A ) ,msg=__A ) | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
from math import ceil
def lowercase__ ( A_: Optional[Any] , A_: Tuple ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =list(range(0 , A_ ) )
__UpperCAmelCase =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCAmelCase =[]
for i in device_map_blocks:
if device_map_blocks.count(A_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(A_ )
# Missing blocks
__UpperCAmelCase =[i for i in blocks if i not in device_map_blocks]
__UpperCAmelCase =[i for i in device_map_blocks if i not in blocks]
if len(A_ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(A_ ) )
if len(A_ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(A_ ) )
if len(A_ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(A_ ) )
def lowercase__ ( A_: Optional[Any] , A_: Optional[int] ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =list(range(A_ ) )
__UpperCAmelCase =int(ceil(n_layers / len(A_ ) ) )
__UpperCAmelCase =[layers[i : i + n_blocks] for i in range(0 , A_ , A_ )]
return dict(zip(A_ , A_ ) )
| 68 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( _UpperCAmelCase : int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_UpperCAmelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
__snake_case = QuantumRegister(_UpperCAmelCase , "qr" )
__snake_case = ClassicalRegister(_UpperCAmelCase , "cr" )
__snake_case = QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = number_of_qubits
for i in range(_UpperCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_UpperCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _UpperCAmelCase , _UpperCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_UpperCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_UpperCAmelCase , _UpperCAmelCase )
# simulate with 10000 shots
__snake_case = Aer.get_backend("qasm_simulator" )
__snake_case = execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_00_00 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 69 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : Any = imread(r"digital_image_processing/image_data/lena_small.jpg")
lowerCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase_ = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase_ = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert med.median_filter(lowercase , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
lowerCamelCase_ = bs.Burkes(imread(lowercase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
lowerCamelCase_ = rs.NearestNeighbour(imread(lowercase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase_ = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = image[x_coordinate][y_coordinate]
lowerCamelCase_ = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase_ = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 70 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def UpperCamelCase ( lowercase_ : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def UpperCamelCase ( lowercase_ : str = "" ) -> bool:
'''simple docstring'''
if len(lowercase_ ) == 0:
return True
lowercase =input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase ={}
for character in lower_case_input_str:
lowercase =character_freq_dict.get(lowercase_ , 0 ) + 1
lowercase =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase ( lowercase_ : str = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , lowercase_ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(lowercase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(lowercase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
_UpperCAmelCase : str = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 72 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n')
for i in range(_UpperCAmelCase):
for j in range(_UpperCAmelCase):
if dist[i][j] != float('inf'):
print(int(dist[i][j]) , end='\t')
else:
print('INF' , end='\t')
print()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = [[float('inf') for _ in range(_UpperCAmelCase)] for _ in range(_UpperCAmelCase)]
for i in range(_UpperCAmelCase):
for j in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCAmelCase):
# looping through rows of graph array
for i in range(_UpperCAmelCase):
# looping through columns of graph array
for j in range(_UpperCAmelCase):
if (
dist[i][k] != float('inf')
and dist[k][j] != float('inf')
and dist[i][k] + dist[k][j] < dist[i][j]
):
SCREAMING_SNAKE_CASE = dist[i][k] + dist[k][j]
_print_dist(_UpperCAmelCase , _UpperCAmelCase)
return dist, v
if __name__ == "__main__":
a_ : Dict = int(input('Enter number of vertices: '))
a_ : str = int(input('Enter number of edges: '))
a_ : Tuple = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
a_ : Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
a_ : str = int(input('Enter source:'))
a_ : Any = int(input('Enter destination:'))
a_ : List[Any] = float(input('Enter weight:'))
a_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 73 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''deta'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , _A : Dict=None , _A : Optional[int]=900 , _A : Dict=2048 , _A : int=6 , _A : Dict=2048 , _A : List[Any]=8 , _A : Union[str, Any]=6 , _A : Tuple=1024 , _A : Tuple=8 , _A : Dict=0.0 , _A : List[Any]=True , _A : List[Any]="relu" , _A : Tuple=256 , _A : Union[str, Any]=0.1 , _A : Optional[Any]=0.0 , _A : Dict=0.0 , _A : int=0.02 , _A : int=1.0 , _A : Optional[Any]=True , _A : Any=False , _A : Optional[int]="sine" , _A : Optional[int]=5 , _A : List[Any]=4 , _A : Tuple=4 , _A : Dict=True , _A : Tuple=300 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[Any]=1 , _A : Dict=5 , _A : Optional[int]=2 , _A : Tuple=1 , _A : Union[str, Any]=1 , _A : Union[str, Any]=5 , _A : Tuple=2 , _A : str=0.1 , _A : Optional[int]=0.25 , **_A : Optional[Any] , ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : int = backbone_config.pop('''model_type''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE : Any = config_class.from_dict(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config
__SCREAMING_SNAKE_CASE : Tuple = num_queries
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Tuple = d_model
__SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : int = encoder_layers
__SCREAMING_SNAKE_CASE : List[str] = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : str = decoder_layers
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = dropout
__SCREAMING_SNAKE_CASE : int = attention_dropout
__SCREAMING_SNAKE_CASE : Dict = activation_dropout
__SCREAMING_SNAKE_CASE : str = activation_function
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Union[str, Any] = init_xavier_std
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Tuple = auxiliary_loss
__SCREAMING_SNAKE_CASE : Optional[int] = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE : Dict = num_feature_levels
__SCREAMING_SNAKE_CASE : Optional[int] = encoder_n_points
__SCREAMING_SNAKE_CASE : Dict = decoder_n_points
__SCREAMING_SNAKE_CASE : List[str] = two_stage
__SCREAMING_SNAKE_CASE : Dict = two_stage_num_proposals
__SCREAMING_SNAKE_CASE : List[Any] = with_box_refine
__SCREAMING_SNAKE_CASE : Optional[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__SCREAMING_SNAKE_CASE : Optional[int] = class_cost
__SCREAMING_SNAKE_CASE : List[Any] = bbox_cost
__SCREAMING_SNAKE_CASE : str = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE : Optional[int] = mask_loss_coefficient
__SCREAMING_SNAKE_CASE : Tuple = dice_loss_coefficient
__SCREAMING_SNAKE_CASE : int = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE : str = giou_loss_coefficient
__SCREAMING_SNAKE_CASE : int = eos_coefficient
__SCREAMING_SNAKE_CASE : List[str] = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return self.d_model
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type
return output
| 74 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , ) -> List[str]:
__lowercase : Tuple = parent
__lowercase : List[Any] = batch_size
__lowercase : str = seq_length
__lowercase : List[Any] = is_training
__lowercase : Optional[int] = use_attention_mask
__lowercase : Dict = use_token_type_ids
__lowercase : List[str] = use_labels
__lowercase : List[str] = vocab_size
__lowercase : Any = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : Tuple = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Dict = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : int = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : Any = num_choices
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = None
if self.use_attention_mask:
__lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Optional[Any] = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ) -> int:
__lowercase : str = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase : Union[str, Any] = config_and_inputs
__lowercase : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ) -> int:
__lowercase : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _lowerCamelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
__lowercase : Optional[int] = model_class_name.from_pretrained('''albert-base-v2''' )
__lowercase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : str = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
__lowercase : Tuple = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
__lowercase : Optional[int] = (1, 11, 7_68)
self.assertEqual(output.shape , UpperCamelCase_ )
__lowercase : Dict = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 ) )
| 76 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(UpperCamelCase ) , UpperCamelCase )
return number - int(UpperCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 77 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase_ = range(3 , int(math.sqrt(snake_case_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int]=1 , **snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = factor * value
UpperCAmelCase_ = value
while not is_prime(snake_case_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **snake_case_ )
return value
| 78 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCamelCase )
return parser.parse_args()
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = parse_args()
# Import training_script as a module.
UpperCAmelCase__ : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase__ : List[str] = script_fpath.stem
UpperCAmelCase__ : Optional[Any] = importlib.import_module(__lowerCamelCase )
# Patch sys.argv
UpperCAmelCase__ : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 79 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : Union[str, Any] , _lowerCAmelCase : float ) -> float:
"""simple docstring"""
return 0.0
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowercase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = 512
__lowercase = [1] + [0] * (size - 1)
__lowercase = [filter_type.process(lowerCamelCase ) for item in inputs]
__lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowercase = np.abs(np.fft.fft(lowerCamelCase ) )
__lowercase = 20 * np.logaa(lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__lowercase = get_bounds(lowerCamelCase , lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(lowerCamelCase )
plt.show()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = 512
__lowercase = [1] + [0] * (size - 1)
__lowercase = [filter_type.process(lowerCamelCase ) for item in inputs]
__lowercase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowercase = np.angle(np.fft.fft(lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(lowerCamelCase , -2 * pi ) )
plt.show()
| 80 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase ):
stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 )
return arr
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__snake_case , __snake_case : Any = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__snake_case : List[str] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowerCamelCase , i + t , (__lowerCamelCase) )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
if __name__ == "__main__":
_snake_case : List[Any] = input("Enter numbers separated by a comma:\n").strip()
_snake_case : str = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 81 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a__ ( ):
UpperCAmelCase_ = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
UpperCAmelCase_ = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
UpperCAmelCase_ = parser.parse_args()
if not hasattr(lowerCAmelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase_ = args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 82 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 83 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowercase = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
dataset_info.write_to_directory(__SCREAMING_SNAKE_CASE )
lowercase = DatasetInfo.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , 'dataset_info.json' ) )
def UpperCAmelCase_ ( ):
lowercase = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase = dataset_info._to_yaml_dict()
assert sorted(__SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase = yaml.safe_dump(__SCREAMING_SNAKE_CASE )
lowercase = yaml.safe_load(__SCREAMING_SNAKE_CASE )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ):
lowercase = DatasetInfo()
lowercase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
dataset_infos_dict.write_to_directory(__SCREAMING_SNAKE_CASE )
lowercase = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , 'README.md' ) )
| 84 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
import random
class snake_case :
@staticmethod
def __lowercase( a_ : str )-> tuple[list[int], list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [ord(a_ ) for i in text]
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Any = []
for i in plain:
SCREAMING_SNAKE_CASE__ : Dict = random.randint(1 , 300 )
SCREAMING_SNAKE_CASE__ : List[Any] = (i + k) * k
cipher.append(a_ )
key.append(a_ )
return cipher, key
@staticmethod
def __lowercase( a_ : list[int] , a_ : list[int] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(len(a_ ) ):
SCREAMING_SNAKE_CASE__ : Dict = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(a_ ) )
return "".join(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 85 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,**__UpperCamelCase : str ):
"""simple docstring"""
A_ = AutoConfig.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A_ = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 86 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
from __future__ import annotations
import requests
_lowerCamelCase : str = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict:
"""simple docstring"""
A__ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ):
A__ = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(lowercase_ )
A__ = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
A__ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )}
A__ = {}
for id_ in range(lowercase_ ):
A__ = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , ) -> Dict:
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : str = 13
_lowerCamelCase : Union[str, Any] = 7
_lowerCamelCase : Optional[int] = 30
_lowerCamelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCamelCase : Dict = 15
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[Any] = 99
_lowerCamelCase : Tuple = [10, 50, 80]
_lowerCamelCase : Tuple = 32
_lowerCamelCase : int = 32
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : Optional[Any] = 8
_lowerCamelCase : List[Any] = 128
_lowerCamelCase : Dict = 2
_lowerCamelCase : str = 2
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : int = 0
_lowerCamelCase : Tuple = 3
_lowerCamelCase : str = self.vocab_size - 1
_lowerCamelCase : str = 0.01
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase_ ( self) -> int:
random.seed(self.seed)
tf.random.set_seed(self.seed)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Any:
_lowerCamelCase : str = TFTransfoXLModel(SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE).to_tuple()
_lowerCamelCase : int = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCamelCase , _lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Tuple:
_lowerCamelCase : str = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE).to_tuple()
_lowerCamelCase : Tuple = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCamelCase , _lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE).to_tuple()
_lowerCamelCase , _lowerCamelCase : int = model([input_ids_a, mems_a]).to_tuple()
_lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCamelCase , _lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Tuple = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : str = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) : int = config_and_inputs
_lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase = () if is_tf_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : List[str] = TFTransfoXLModelTester(self)
_lowerCamelCase : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , d_embed=37)
def UpperCamelCase_ ( self) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> Optional[Any]:
self.model_tester.set_seed()
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
self.model_tester.set_seed()
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
_lowerCamelCase : Union[str, Any] = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer)
_lowerCamelCase : Tuple = model.get_bias()
assert name is None
else:
_lowerCamelCase : Dict = model.get_output_embeddings()
assert x is None
_lowerCamelCase : List[str] = model.get_bias()
assert name is None
def UpperCamelCase_ ( self) -> Dict:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase_ ( self) -> int:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""")
def UpperCamelCase_ ( self) -> Any:
pass
@require_tf
class lowercase__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""")
@slow
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Optional[int] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""")
# fmt: off
_lowerCamelCase : int = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCamelCase : List[str] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCamelCase : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_length=200 , do_sample=SCREAMING_SNAKE_CASE)
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE)
| 88 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'tf_padding'))
self.parent.assertTrue(hasattr(lowerCamelCase, 'depth_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.2_5, lowerCamelCase=8, lowerCamelCase=True, lowerCamelCase=10_24, lowerCamelCase=32, lowerCamelCase="relu6", lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, ) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = parent
_lowercase : Any = batch_size
_lowercase : Dict = num_channels
_lowercase : Dict = image_size
_lowercase : List[str] = depth_multiplier
_lowercase : Tuple = min_depth
_lowercase : Union[str, Any] = tf_padding
_lowercase : Optional[int] = int(last_hidden_size * depth_multiplier)
_lowercase : Tuple = output_stride
_lowercase : Optional[Any] = hidden_act
_lowercase : Dict = classifier_dropout_prob
_lowercase : Optional[int] = use_labels
_lowercase : Optional[Any] = is_training
_lowercase : int = num_labels
_lowercase : Tuple = initializer_range
_lowercase : int = scope
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : str = None
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = MobileNetVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = self.num_labels
_lowercase : str = MobileNetVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = config_and_inputs
_lowercase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : int = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase_ : Union[str, Any] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ : Dict = False
lowercase_ : Optional[Any] = False
lowercase_ : Any = False
lowercase_ : List[str] = False
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = MobileNetVaModelTester(self)
_lowercase : Optional[Any] = MobileNetVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV1 does not output attentions')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : str = model_class(lowerCamelCase)
_lowercase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Dict = [*signature.parameters.keys()]
_lowercase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Any = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Any = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Optional[Any] = 26
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileNetVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Tuple:
_lowercase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224') if is_vision_available() else None
)
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224').to(lowerCamelCase)
_lowercase : List[str] = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Tuple = torch.Size((1, 10_01))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : List[Any] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
| 89 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
__UpperCAmelCase = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _snake_case ( A , A = 1 , A = "new" , A = None ) -> dict:
lowerCAmelCase__ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A ) - valid_terms ) ):
lowerCAmelCase__ = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(A )
lowerCAmelCase__ = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCAmelCase__ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A )}
lowerCAmelCase__ = {}
for id_ in range(A ):
lowerCAmelCase__ = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext'''])) | 90 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 91 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =''''''
lowercase : Tuple =''''''
lowercase : Tuple =[]
lowercase : int =0
lowercase : Union[str, Any] =256
lowercase : Optional[int] =0
lowercase : int =0
lowercase : List[str] =0
lowercase : Dict =0
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =cva.imread(UpperCAmelCase__ , 0 )
lowercase : List[Any] =copy.deepcopy(self.img )
lowercase , lowercase , lowercase : Union[str, Any] =plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
lowercase : Optional[Any] =np.sum(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
lowercase : List[str] =x[i] / self.k
self.sk += prk
lowercase : List[Any] =(self.L - 1) * self.sk
if self.rem != 0:
lowercase : Optional[Any] =int(last % last )
lowercase : str =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCAmelCase__ )
lowercase : Optional[int] =int(np.ma.count(self.img ) / self.img[1].size )
lowercase : Optional[int] =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase : Optional[Any] =self.img[j][i]
if num != self.last_list[num]:
lowercase : List[str] =self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 92 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = XLMProphetNetTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : int =XLMProphetNetTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Optional[Any] ='''[PAD]'''
lowercase : List[str] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase ) , 1012 )
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : Dict =XLMProphetNetTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowercase : Dict =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[Any] =tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase : Any =tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Union[str, Any] ='''Hello World!'''
lowercase : Optional[Any] =[3_5389, 6672, 49, 2]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] ={'''input_ids''': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 94 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ : str = "hf-internal-testing/tiny-random-t5"
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer("This is me" , return_tensors="pt" )
UpperCAmelCase_ : Tuple = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase_ : List[str] = model.generate(**lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase_ : Union[str, Any] = model_reloaded.generate(**lowerCAmelCase_ )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : str = "hf-internal-testing/tiny-random-t5"
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase_ ):
model.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase_ )
| 95 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
"""simple docstring"""
def a ( __UpperCAmelCase : int = 2_0_0_0_0_0_0 ) -> int:
__magic_name__: str = [0 for i in range(n + 1 )]
__magic_name__: Tuple = 1
__magic_name__: Dict = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __UpperCAmelCase ):
__magic_name__: Optional[int] = 1
__magic_name__: Any = 0
for i in range(__UpperCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 96 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from timeit import timeit
def a ( snake_case__: int ):
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowercase_ = 0
while number:
number &= number - 1
result += 1
return result
def a ( snake_case__: int ):
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowercase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a ( ):
'''simple docstring'''
def do_benchmark(snake_case__: int ) -> None:
lowercase_ = '''import __main__ as z'''
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case__ ) = }''' )
lowercase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=snake_case__ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case__ ) = }''' )
lowercase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=snake_case__ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 97 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = OpenAIGPTTokenizer
_snake_case : Tuple = OpenAIGPTTokenizerFast
_snake_case : Union[str, Any] = True
_snake_case : Optional[int] = False
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
return "lower newer", "lower newer"
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_UpperCamelCase = '''lower'''
_UpperCamelCase = ['''low''', '''er</w>''']
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokens + ['''<unk>''']
_UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tuple=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# Simple input
_UpperCamelCase = '''This is a simple input'''
_UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCamelCase = ('''This is a simple input''', '''This is a pair''')
_UpperCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
pass
| 98 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 0 |
def a (lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __snake_case ( lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = []
for line in lines:
SCREAMING_SNAKE_CASE__ = re.sub(r'''#.*''' , '''''' , lowerCAmelCase_ ) # remove comments
if line:
filtered_lines.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = '''\n'''.join(lowerCAmelCase_ )
# Make a hash from all this code
SCREAMING_SNAKE_CASE__ = full_str.encode('''utf-8''' )
return shaaaa(lowerCAmelCase_ ).hexdigest()
# get importable module names and hash for caching
_A : str = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_A : Union[str, Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_A : List[str] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
_A : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 100 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase__ , use_timestep_embedding=lowerCAmelCase__ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
SCREAMING_SNAKE_CASE_ : str = IPNDMScheduler()
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[Any] = DanceDiffusionPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = output.audios
SCREAMING_SNAKE_CASE_ : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
SCREAMING_SNAKE_CASE_ : Tuple = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch_device
SCREAMING_SNAKE_CASE_ : Any = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.audios
SCREAMING_SNAKE_CASE_ : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE_ : int = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch_device
SCREAMING_SNAKE_CASE_ : Optional[int] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = pipe(generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.audios
SCREAMING_SNAKE_CASE_ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE_ : Tuple = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 101 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 0 |
"""simple docstring"""
from itertools import permutations
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCamelCase (SCREAMING_SNAKE_CASE = 10 ):
return sum(
int("""""".join(map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 102 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = (DDPMScheduler,)
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1E-5
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = len(__lowerCamelCase )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
_snake_case = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
_snake_case = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case = pred_prev_sample
_snake_case = torch.sum(torch.abs(__lowerCamelCase ) )
_snake_case = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(prediction_type='''v_prediction''' )
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = len(__lowerCamelCase )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
_snake_case = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
_snake_case = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case = pred_prev_sample
_snake_case = torch.sum(torch.abs(__lowerCamelCase ) )
_snake_case = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCamelCase )
_snake_case = scheduler.timesteps
for i, timestep in enumerate(__lowerCamelCase ):
if i == len(__lowerCamelCase ) - 1:
_snake_case = -1
else:
_snake_case = timesteps[i + 1]
_snake_case = scheduler.previous_timestep(__lowerCamelCase )
_snake_case = prev_t.item()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 1, 0]
_snake_case = len(__lowerCamelCase )
with self.assertRaises(__lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
| 103 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
pass
def _lowerCamelCase ( UpperCAmelCase_ : Image ) -> str:
"""simple docstring"""
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
A__ : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = DepthEstimationPipeline(model=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , SCREAMING_SNAKE_CASE__ )
import datasets
A__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A__ = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , SCREAMING_SNAKE_CASE__ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def snake_case__ ( self ) -> Dict:
pass
@slow
@require_torch
def snake_case__ ( self ) -> Optional[int]:
A__ = "Intel/dpt-large"
A__ = pipeline("depth-estimation" , model=SCREAMING_SNAKE_CASE__ )
A__ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A__ = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def snake_case__ ( self ) -> str:
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 104 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = "M-CLIP"
def __init__( self ,snake_case__=1024 ,snake_case__=768 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = transformerDimSize
SCREAMING_SNAKE_CASE_ : int = imageDimSize
super().__init__(**snake_case__ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = MCLIPConfig
def __init__( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
super().__init__(snake_case__ ,*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = XLMRobertaModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = self.transformer(input_ids=snake_case__ ,attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(snake_case__ ), embs
| 105 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=13 , __UpperCamelCase : int=7 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : int=99 , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : str=5 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple=512 , __UpperCamelCase : Dict=16 , __UpperCamelCase : str=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4 , __UpperCamelCase : List[Any]=None , ) -> List[str]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
A = self.vocab_size - 1
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , *__UpperCamelCase : Optional[Any] ) -> str:
A = OpenAIGPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , head_mask=__UpperCamelCase )
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , *__UpperCamelCase : Tuple ) -> List[str]:
A = OpenAIGPTLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Tuple ) -> Optional[Any]:
A = OpenAIGPTDoubleHeadsModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , *__UpperCamelCase : str ) -> str:
A = self.num_labels
A = OpenAIGPTForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A_ : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A_ : Any = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any]=False ) -> int:
A = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase , )
A = inputs_dict['labels']
A = inputs_dict['labels']
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__UpperCamelCase , )
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __UpperCamelCase ( self : Any ) -> List[str]:
A = OpenAIGPTModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , n_embd=37 )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = OpenAIGPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
A = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__UpperCamelCase )
A = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=__UpperCamelCase ) # the president is
A = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase )
self.assertListEqual(output_ids[0].tolist() , __UpperCamelCase ) | 106 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE ( __snake_case : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : str=13 , lowerCamelCase : Union[str, Any]=30 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : List[str]=32 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Any=4 , lowerCamelCase : Optional[int]=37 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : List[str]=10 , lowerCamelCase : Any=0.02 , lowerCamelCase : List[Any]=3 , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=2 , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 2
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = DeiTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : int , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeiTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = DeiTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = DeiTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
_UpperCAmelCase = model(**lowerCamelCase ).loss
loss.backward()
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_UpperCAmelCase = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
_UpperCAmelCase = model(**lowerCamelCase ).loss
loss.backward()
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase ),
*get_values(lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
_UpperCAmelCase = problem_type["""title"""]
_UpperCAmelCase = problem_type["""num_labels"""]
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if problem_type["num_labels"] > 1:
_UpperCAmelCase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
_UpperCAmelCase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase ) as warning_list:
_UpperCAmelCase = model(**lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DeiTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowerCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
_UpperCAmelCase = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase ) | 108 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __magic_name__ ( __UpperCAmelCase = True , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__SCREAMING_SNAKE_CASE = False
if main_process_only:
__SCREAMING_SNAKE_CASE = PartialState().local_process_index == 0
return _tqdm(*__UpperCAmelCase , **__UpperCAmelCase , disable=__UpperCAmelCase )
| 109 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case : Tuple = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
_snake_case : Optional[int] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
_snake_case : int = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
_snake_case : Union[str, Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__lowerCAmelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__lowerCAmelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
__lowerCAmelCase = [file for file in filepaths if """ """ in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
__lowerCAmelCase = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
__lowerCAmelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
__lowerCAmelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 229 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A=3 , A=3_2 , A=3 , A=1_0 , A=[1_0, 2_0, 3_0, 4_0] , A=[1, 1, 2, 1] , A=True , A=True , A="relu" , A=3 , A=None , ) -> Optional[Any]:
_UpperCAmelCase : Any = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Tuple = embeddings_size
_UpperCAmelCase : Dict = hidden_sizes
_UpperCAmelCase : int = depths
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Tuple = num_labels
_UpperCAmelCase : str = scope
_UpperCAmelCase : Union[str, Any] = len(A )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCAmelCase ( self , A , A , A ) -> str:
_UpperCAmelCase : Union[str, Any] = TFResNetModel(config=A )
_UpperCAmelCase : str = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCAmelCase ( self , A , A , A ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : List[str] = TFResNetForImageClassification(A )
_UpperCAmelCase : str = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
a__ =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a__ =(
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
a__ =False
a__ =False
a__ =False
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : str = TFResNetModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=A , has_text_modality=A )
def __lowerCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> str:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(A )
_UpperCAmelCase : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCAmelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCAmelCase ( self ) -> Dict:
def check_hidden_states_output(A , A , A ):
_UpperCAmelCase : List[Any] = model_class(A )
_UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
_UpperCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase : List[str] = layer_type
_UpperCAmelCase : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Tuple = True
check_hidden_states_output(A , A , A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __lowerCAmelCase ( self ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=A , return_tensors='''tf''' )
# forward pass
_UpperCAmelCase : List[Any] = model(**A )
# verify the logits
_UpperCAmelCase : Tuple = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
_UpperCAmelCase : List[str] = tf.constant([-1_1.1_0_6_9, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1E-4 ) )
| 506 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_UpperCamelCase = False
@skip_mps
class SCREAMING_SNAKE_CASE_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__snake_case : Optional[int] = StableDiffusionAttendAndExcitePipeline
__snake_case : Union[str, Any] = False
__snake_case : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
__snake_case : int = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
__snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowercase ( cls :Tuple ):
super().setUpClass()
torch.use_deterministic_algorithms(__lowercase )
@classmethod
def __lowercase ( cls :List[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(__lowercase )
def __lowercase ( self :str ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , )
__lowerCamelCase : str =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__lowerCamelCase : Tuple =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase : Any =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__lowerCamelCase : str =CLIPTextModel(__lowercase )
__lowerCamelCase : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase : Tuple ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self :Union[str, Any] , __lowercase :List[str] , __lowercase :str=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCamelCase : Union[str, Any] =torch.manual_seed(__lowercase )
else:
__lowerCamelCase : Optional[int] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCamelCase : Union[str, Any] ={
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __lowercase ( self :List[str] ):
__lowerCamelCase : Optional[Any] ='cpu'
__lowerCamelCase : int =self.get_dummy_components()
__lowerCamelCase : Optional[int] =self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : Tuple =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : Union[str, Any] =pipe(**__lowercase ).images
__lowerCamelCase : Optional[int] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__lowerCamelCase : str =np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
__lowerCamelCase : Dict =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase , 1e-3 )
def __lowercase ( self :List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __lowercase ( self :Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self :List[str] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __lowercase ( self :Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowercase ( self :Optional[int] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __lowercase ( self :Optional[Any] ):
super().test_save_load_local(expected_max_difference=5e-4 )
def __lowercase ( self :List[Any] ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls :str ):
super().setUpClass()
torch.use_deterministic_algorithms(__lowercase )
@classmethod
def __lowercase ( cls :List[str] ):
super().tearDownClass()
torch.use_deterministic_algorithms(__lowercase )
def __lowercase ( self :Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :int ):
__lowerCamelCase : List[str] =torch.manual_seed(51 )
__lowerCamelCase : int =StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=__lowercase , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__lowerCamelCase : Optional[Any] ='a painting of an elephant with glasses'
__lowerCamelCase : Dict =[5, 7]
__lowerCamelCase : Tuple =pipe(
prompt=__lowercase , token_indices=__lowercase , guidance_scale=7.5 , generator=__lowercase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
__lowerCamelCase : Any =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 179 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict =logging.get_logger(__name__)
A_ : Any ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class __UpperCAmelCase ( __UpperCamelCase ):
__A : List[Any] = 'markuplm'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1E-12 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=256 , _lowerCamelCase=1024 , _lowerCamelCase=216 , _lowerCamelCase=1001 , _lowerCamelCase=32 , _lowerCamelCase=50 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
# additional properties
lowerCAmelCase_ = max_depth
lowerCAmelCase_ = max_xpath_tag_unit_embeddings
lowerCAmelCase_ = max_xpath_subs_unit_embeddings
lowerCAmelCase_ = tag_pad_id
lowerCAmelCase_ = subs_pad_id
lowerCAmelCase_ = xpath_unit_hidden_size
| 274 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
A = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple):
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase : int = 'lm_head'
lowerCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if weight_type is not None:
lowerCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).shape
else:
lowerCamelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase : int = value
elif weight_type == "weight_g":
lowerCamelCase : str = value
elif weight_type == "weight_v":
lowerCamelCase : int = value
elif weight_type == "bias":
lowerCamelCase : Tuple = value
else:
lowerCamelCase : List[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple):
lowerCamelCase : Tuple = []
lowerCamelCase : List[str] = fairseq_model.state_dict()
lowerCamelCase : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase : Union[str, Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
lowerCamelCase : Tuple = True
if "*" in mapped_key:
lowerCamelCase : str = name.split(SCREAMING_SNAKE_CASE__)[0].split('.')[-2]
lowerCamelCase : int = mapped_key.replace('*' , SCREAMING_SNAKE_CASE__)
if "weight_g" in name:
lowerCamelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
lowerCamelCase : Optional[int] = 'weight_v'
elif "bias" in name:
lowerCamelCase : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase : Union[str, Any] = 'weight'
else:
lowerCamelCase : int = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__)
logger.warning(F'''Unused weights: {unused_weights}''')
def UpperCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int):
lowerCamelCase : str = full_name.split('conv_layers.')[-1]
lowerCamelCase : int = name.split('.')
lowerCamelCase : Union[str, Any] = int(items[0])
lowerCamelCase : Dict = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(SCREAMING_SNAKE_CASE__)
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=True):
if config_path is not None:
lowerCamelCase : str = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
else:
lowerCamelCase : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase : List[str] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase : Any = target_dict.pad_index
lowerCamelCase : Any = target_dict.bos_index
lowerCamelCase : List[Any] = target_dict.eos_index
lowerCamelCase : int = len(target_dict.symbols)
lowerCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , 'vocab.json')
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE__))
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
lowerCamelCase : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase : Dict = 42
lowerCamelCase : Union[str, Any] = 43
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
lowerCamelCase : Tuple = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase : List[Any] = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase : int = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__)
processor.save_pretrained(SCREAMING_SNAKE_CASE__)
lowerCamelCase : Dict = UniSpeechForCTC(SCREAMING_SNAKE_CASE__)
else:
lowerCamelCase : List[Any] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__)
if is_finetuned:
lowerCamelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
lowerCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 320 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=99 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : List[str]=5 , lowerCAmelCase : str=4 , lowerCAmelCase : int=37 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : str=2 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Dict=None , ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Any = parent
__UpperCamelCase : Optional[Any] = batch_size
__UpperCamelCase : List[str] = seq_length
__UpperCamelCase : Optional[int] = is_training
__UpperCamelCase : Optional[int] = use_input_mask
__UpperCamelCase : Dict = use_token_type_ids
__UpperCamelCase : Optional[Any] = use_labels
__UpperCamelCase : str = vocab_size
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : Dict = embedding_size
__UpperCamelCase : str = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : str = hidden_act
__UpperCamelCase : Any = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : List[Any] = max_position_embeddings
__UpperCamelCase : List[str] = type_vocab_size
__UpperCamelCase : Union[str, Any] = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Union[str, Any] = num_labels
__UpperCamelCase : List[str] = num_choices
__UpperCamelCase : Optional[int] = scope
def lowerCamelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Any = None
if self.use_input_mask:
__UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Tuple = None
if self.use_token_type_ids:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Tuple = None
if self.use_labels:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
__UpperCamelCase : List[Any] = MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
__UpperCamelCase : Any = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__UpperCamelCase : Optional[Any] = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : List[Any] = MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__UpperCamelCase : str = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__UpperCamelCase : List[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[Any] = MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__UpperCamelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase : str = MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__UpperCamelCase : Optional[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Tuple = self.num_labels
__UpperCamelCase : List[str] = MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__UpperCamelCase : List[str] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Any = self.num_labels
__UpperCamelCase : Tuple = MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__UpperCamelCase : Dict = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : Dict = self.num_choices
__UpperCamelCase : Any = MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__UpperCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Union[str, Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
__UpperCamelCase
) : str = config_and_inputs
__UpperCamelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[Any] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ : Optional[int] = True
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
__UpperCamelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
__UpperCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase : str = MobileBertModelTester(self )
__UpperCamelCase : List[str] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def A__ (snake_case : List[str] ) -> List[Any]:
return torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a__ = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Optional[int] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
__UpperCamelCase : Optional[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__UpperCamelCase : Union[str, Any] = model(lowerCAmelCase )[0]
__UpperCamelCase : Optional[int] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowerCAmelCase )
__UpperCamelCase : Dict = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__UpperCamelCase : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__UpperCamelCase : List[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( __UpperCamelCase , unittest.TestCase ):
__A : Dict = KandinskyVaaImgaImgPipeline
__A : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image"""]
__A : Tuple = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__A : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__A : Any = False
@property
def UpperCamelCase( self ):
return 32
@property
def UpperCamelCase( self ):
return 32
@property
def UpperCamelCase( self ):
return self.time_input_dim
@property
def UpperCamelCase( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase( self ):
return 100
@property
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCAmelCase = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def UpperCamelCase( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase( self ):
_UpperCAmelCase = self.dummy_unet
_UpperCAmelCase = self.dummy_movq
_UpperCAmelCase = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_UpperCAmelCase = DDIMScheduler(**_UpperCamelCase )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
_UpperCAmelCase = output.images
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
_UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_UpperCAmelCase = 'A red cartoon frog, 4k'
_UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
_UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCAmelCase = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_UpperCAmelCase = pipeline(
image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase ) | 32 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 0 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def a ( __snake_case : Any ):
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase ( _snake_case : Optional[Any] = "" ):
'''simple docstring'''
lowercase__ = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowercase__ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text ,"html.parser" )
lowercase__ = soup.find_all("td" ,attrs="titleColumn" )
lowercase__ = soup.find_all("td" ,class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
}
def lowerCamelCase ( _snake_case : Tuple = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
lowercase__ = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ ,"w" ,newline="" ) as out_file:
lowercase__ = csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 267 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.