code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , ):
A_ : Any = parent
A_ : int = batch_size
A_ : str = image_size
A_ : int = patch_size
A_ : Union[str, Any] = num_channels
A_ : List[Any] = is_training
A_ : List[str] = use_labels
A_ : Optional[int] = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Any = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Union[str, Any] = (image_size // patch_size) ** 2
A_ : Optional[Any] = num_patches + 1
def _a (self ):
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Tuple = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Dict = ViTMSNModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = self.type_sequence_label_size
A_ : List[str] = ViTMSNForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , labels=lowercase )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : int = 1
A_ : str = ViTMSNForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : str = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
A_ : str = self.prepare_config_and_inputs()
A_, A_, A_ : Union[str, Any] = config_and_inputs
A_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Tuple = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def _a (self ):
A_ : Optional[int] = ViTMSNModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(lowercase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = ViTMSNModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _a (self ):
torch.manual_seed(2 )
A_ : List[str] = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(lowercase )
A_ : Union[str, Any] = self.default_image_processor
A_ : List[Any] = prepare_img()
A_ : Optional[int] = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : Optional[Any] = model(**lowercase )
# verify the logits
A_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[Any] = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=50 , lowercase=0.02 , lowercase=True , lowercase=None , ):
A_ : Union[str, Any] = parent
A_ : Union[str, Any] = batch_size
A_ : Tuple = seq_length
A_ : Dict = is_training
A_ : str = use_input_mask
A_ : Optional[Any] = vocab_size
A_ : Optional[int] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Optional[Any] = initializer_range
A_ : str = use_labels
A_ : Tuple = scope
def _a (self ):
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : str = None
if self.use_input_mask:
A_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def _a (self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _a (self ):
(
(
A_
), (
A_
), (
A_
), (
A_
),
) : Dict = self.prepare_config_and_inputs()
A_ : str = True
A_ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Optional[int] = BertGenerationEncoder(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase )
A_ : int = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = True
A_ : Any = BertGenerationEncoder(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
A_ : int = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Union[str, Any] = True
A_ : Optional[Any] = True
A_ : Optional[Any] = BertGenerationDecoder(config=lowercase ).to(lowercase ).eval()
# first forward pass
A_ : List[Any] = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , use_cache=lowercase , )
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : str = torch.cat([input_mask, next_mask] , dim=-1 )
A_ : Union[str, Any] = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , output_hidden_states=lowercase , )["""hidden_states"""][0]
A_ : Optional[Any] = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )["""hidden_states"""][0]
# select random slice
A_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def _a (self , lowercase , lowercase , lowercase , lowercase , *lowercase , ):
A_ : Optional[Any] = BertGenerationDecoder(lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self ):
A_, A_, A_, A_ : Dict = self.prepare_config_and_inputs()
A_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _a (self ):
A_ : Optional[int] = BertGenerationEncoderTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_, A_, A_, A_ : List[str] = self.model_tester.prepare_config_and_inputs()
A_ : Optional[Any] = """bert"""
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase )
def _a (self ):
# This regression test was failing with PyTorch < 1.3
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowercase )
@slow
def _a (self ):
A_ : int = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowercase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Dict = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A_ : str = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A_ : Any = model(lowercase )[0]
A_ : Tuple = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowercase )
A_ : Optional[Any] = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Tuple = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A_ : Union[str, Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A_ : Union[str, Any] = model(lowercase )[0]
A_ : List[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowercase )
A_ : Union[str, Any] = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) ) | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase :str = logging.get_logger(__name__)
lowerCamelCase :Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A_ : Union[str, Any] = value
elif weight_type == "weight_g":
A_ : Tuple = value
elif weight_type == "weight_v":
A_ : Optional[Any] = value
elif weight_type == "bias":
A_ : List[Any] = value
else:
A_ : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = []
A_ : Dict = fairseq_model.state_dict()
A_ : int = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
A_ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
A_ : Union[str, Any] = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
A_ : Optional[int] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : List[str] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : str = """weight_g"""
elif "weight_v" in name:
A_ : str = """weight_v"""
elif "weight" in name:
A_ : int = """weight"""
elif "bias" in name:
A_ : Dict = """bias"""
else:
A_ : Optional[int] = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = full_name.split("""conv_layers.""" )[-1]
A_ : Dict = name.split(""".""" )
A_ : Any = int(items[0] )
A_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A_ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A_ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A_ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A_ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
'''simple docstring'''
if config_path is not None:
A_ : Optional[Any] = HubertConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Tuple = HubertConfig()
if is_finetuned:
if dict_path:
A_ : Optional[Any] = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : List[str] = target_dict.pad_index
A_ : Dict = target_dict.bos_index
A_ : Optional[int] = target_dict.eos_index
A_ : Optional[Any] = len(target_dict.symbols )
A_ : Optional[Any] = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , lowerCamelCase__ )
A_ : List[Any] = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
A_ : Tuple = True if config.feat_extract_norm == """layer""" else False
A_ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
A_ : List[str] = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
A_ : Optional[Any] = HubertForCTC(lowerCamelCase__ )
else:
A_ : Any = HubertModel(lowerCamelCase__ )
if is_finetuned:
A_, A_, A_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A_, A_, A_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ : Tuple = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase :Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 667 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = DebertaTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : int = DebertaTokenizerFast
def _a (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
A_ : str = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : Tuple = {"""unk_token""": """[UNK]"""}
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def _a (self , **lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _a (self , lowercase ):
A_ : List[str] = """lower newer"""
A_ : Any = """lower newer"""
return input_text, output_text
def _a (self ):
A_ : List[Any] = self.get_tokenizer()
A_ : List[str] = """lower newer"""
A_ : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A_ : Optional[Any] = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
A_ : Optional[int] = tokens + [tokenizer.unk_token]
A_ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = tokenizer("""Hello""" , """World""" )
A_ : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , lowercase )
@slow
def _a (self ):
A_ : Optional[Any] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
A_ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
A_ : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase , add_prefix_space=lowercase )
A_ : List[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase , add_prefix_space=lowercase )
A_ : Any = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _a (self ):
A_ : int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A_ : List[Any] = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
A_ : List[Any] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
A_ : Optional[int] = tokenizer(lowercase , padding=lowercase )
A_ : str = [tokenizer.decode(lowercase , skip_special_tokens=lowercase ) for seq in encoding["""input_ids"""]]
# fmt: off
A_ : Optional[int] = {
"""input_ids""": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A_ : Dict = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , lowercase )
for expected, decoded in zip(lowercase , lowercase ):
self.assertEqual(lowercase , lowercase ) | 667 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 1 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , *lowercase , **lowercase ):
super().__init__(*lowercase , **lowercase )
A_ : Optional[int] = {}
def _a (self , lowercase , *lowercase , **lowercase ):
A_ : Tuple = super().add_tokens(lowercase , *lowercase , **lowercase )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
""" `placeholder_token` that is not already in the tokenizer.""" )
def _a (self , lowercase , *lowercase , lowercase=1 , **lowercase ):
A_ : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
else:
A_ : Union[str, Any] = []
for i in range(lowercase ):
A_ : Tuple = placeholder_token + F'_{i}'
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
A_ : List[str] = output
def _a (self , lowercase , lowercase=False , lowercase=1.0 ):
if isinstance(lowercase , lowercase ):
A_ : str = []
for i in range(len(lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A_ : Tuple = self.token_map[placeholder_token]
A_ : Optional[Any] = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
A_ : List[str] = copy.copy(lowercase )
random.shuffle(lowercase )
A_ : Dict = text.replace(lowercase , """ """.join(lowercase ) )
return text
def __call__(self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
def _a (self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ):
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , ) | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
lowerCamelCase :Optional[Any] = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
lowerCamelCase :Union[str, Any] = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
lowerCamelCase :Union[str, Any] = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _a (self , lowercase , lowercase , lowercase=None , lowercase=1 , lowercase="binary" , lowercase=None , lowercase="warn" , ):
A_ : Any = recall_score(
lowercase , lowercase , labels=lowercase , pos_label=lowercase , average=lowercase , sample_weight=lowercase , zero_division=lowercase , )
return {"recall": float(lowercase ) if score.size == 1 else score} | 667 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = (IPNDMScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = (('num_inference_steps', 50),)
def _a (self , **lowercase ):
A_ : int = {"""num_train_timesteps""": 1000}
config.update(**lowercase )
return config
def _a (self , lowercase=0 , **lowercase ):
A_ : List[str] = dict(self.forward_default_kwargs )
A_ : Optional[int] = kwargs.pop("""num_inference_steps""" , lowercase )
A_ : List[Any] = self.dummy_sample
A_ : Union[str, Any] = 0.1 * sample
A_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ : List[str] = self.get_scheduler_config(**lowercase )
A_ : Optional[int] = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A_ : str = dummy_past_residuals[:]
if time_step is None:
A_ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A_ : Any = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A_ : Union[str, Any] = dummy_past_residuals[:]
A_ : str = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Optional[int] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Optional[Any] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a (self ):
pass
def _a (self , lowercase=0 , **lowercase ):
A_ : Tuple = dict(self.forward_default_kwargs )
A_ : List[Any] = kwargs.pop("""num_inference_steps""" , lowercase )
A_ : Dict = self.dummy_sample
A_ : Optional[Any] = 0.1 * sample
A_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
A_ : Dict = dummy_past_residuals[:]
if time_step is None:
A_ : Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A_ : List[Any] = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
A_ : Union[str, Any] = dummy_past_residuals[:]
A_ : Optional[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Union[str, Any] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ : Optional[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : str = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a (self , **lowercase ):
A_ : int = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config(**lowercase )
A_ : List[Any] = scheduler_class(**lowercase )
A_ : Any = 10
A_ : Any = self.dummy_model()
A_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : int = model(lowercase , lowercase )
A_ : int = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ : Dict = model(lowercase , lowercase )
A_ : List[Any] = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def _a (self ):
A_ : Optional[Any] = dict(self.forward_default_kwargs )
A_ : Optional[int] = kwargs.pop("""num_inference_steps""" , lowercase )
for scheduler_class in self.scheduler_classes:
A_ : int = self.get_scheduler_config()
A_ : Any = scheduler_class(**lowercase )
A_ : List[str] = self.dummy_sample
A_ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , """set_timesteps""" ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , """set_timesteps""" ):
A_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ : Optional[Any] = dummy_past_residuals[:]
A_ : Optional[int] = scheduler.timesteps[5]
A_ : Optional[Any] = scheduler.timesteps[6]
A_ : int = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Dict = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ : Any = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Optional[int] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a (self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase , time_step=lowercase )
def _a (self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase , time_step=lowercase )
def _a (self ):
A_ : Optional[Any] = self.full_loop()
A_ : Any = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10 | 667 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :int = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'xlm-roberta'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : List[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : List[Any] = hidden_act
A_ : int = intermediate_size
A_ : Union[str, Any] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : str = initializer_range
A_ : Dict = layer_norm_eps
A_ : List[str] = position_embedding_type
A_ : Tuple = use_cache
A_ : Any = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 667 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , *lowercase , lowercase=None , lowercase=None , **lowercase ):
super().__init__(*lowercase , **lowercase )
A_ : Optional[Any] = eval_examples
A_ : Union[str, Any] = post_process_function
def _a (self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ):
A_ : int = gen_kwargs.copy()
A_ : Any = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A_ : List[Any] = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A_ : int = gen_kwargs
A_ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
A_ : Optional[Any] = self.get_eval_dataloader(lowercase )
A_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A_ : Dict = self.compute_metrics
A_ : Tuple = None
A_ : Dict = time.time()
A_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A_ : Tuple = eval_loop(
lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
A_ : List[Any] = compute_metrics
A_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A_ : str = self.post_process_function(lowercase , lowercase , lowercase )
A_ : Tuple = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
A_ : Any = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
A_ : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A_ : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def _a (self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ):
A_ : Tuple = gen_kwargs.copy()
A_ : str = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
A_ : Optional[int] = self.compute_metrics
A_ : int = None
A_ : List[Any] = time.time()
A_ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A_ : Tuple = eval_loop(
lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
A_ : List[str] = compute_metrics
A_ : Dict = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A_ : int = self.post_process_function(lowercase , lowercase , lowercase , """predict""" )
A_ : Optional[int] = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
A_ : List[Any] = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase ) | 667 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
A_ : str = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
A_ : List[Any] = nn.Parameter(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = np.asarray(weights[0] )
A_ : int = np.asarray(weights[1] )
A_ : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.asarray(weights[0] )
A_ : List[Any] = np.asarray(weights[1] )
A_ : str = np.asarray(weights[2] )
A_ : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = weights[0][0][0]
A_ : Dict = np.asarray(layer_norm_a[0] )
A_ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
A_ : Any = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
A_ : List[str] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
A_ : Optional[Any] = intermediate_weights[2]
# layernorm 2
A_ : List[str] = np.asarray(intermediate_weights[0][0] )
A_ : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
A_ : Union[str, Any] = np.asarray(intermediate_weights[1][0] )
A_ : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
A_ : List[Any] = np.asarray(intermediate_weights[4][0] )
A_ : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = torch_model.reformer
# word embeds
A_ : int = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
A_ : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A_ : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
A_ : int = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
A_ : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A_ : Union[str, Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
A_ : List[str] = np.asarray(weights[7][0] )
A_ : Any = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
A_ : Optional[Any] = np.asarray(weights[9][0] )
A_ : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A_ : Any = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
A_ : Union[str, Any] = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase :Optional[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 667 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 1 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : List[str] = 10
def _a (self , **lowercase ):
A_ : int = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowercase )
return config
def _a (self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def _a (self ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def _a (self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase )
def _a (self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def _a (self ):
A_ : List[Any] = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config()
A_ : Dict = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
A_ : Dict = torch.manual_seed(0 )
A_ : Optional[int] = self.dummy_model()
A_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ : Union[str, Any] = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : Union[str, Any] = scheduler.scale_model_input(lowercase , lowercase )
A_ : str = model(lowercase , lowercase )
A_ : List[str] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
A_ : Union[str, Any] = output.prev_sample
A_ : List[Any] = torch.sum(torch.abs(lowercase ) )
A_ : Union[str, Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def _a (self ):
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
A_ : Union[str, Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps )
A_ : List[str] = torch.manual_seed(0 )
A_ : Optional[int] = self.dummy_model()
A_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ : Optional[int] = sample.to(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : Dict = scheduler.scale_model_input(lowercase , lowercase )
A_ : Dict = model(lowercase , lowercase )
A_ : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
A_ : Union[str, Any] = output.prev_sample
A_ : List[Any] = torch.sum(torch.abs(lowercase ) )
A_ : Tuple = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def _a (self ):
A_ : List[Any] = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config()
A_ : Union[str, Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
A_ : List[Any] = torch.manual_seed(0 )
A_ : List[str] = self.dummy_model()
A_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ : Union[str, Any] = sample.to(lowercase )
for t in scheduler.timesteps:
A_ : List[Any] = scheduler.scale_model_input(lowercase , lowercase )
A_ : Union[str, Any] = model(lowercase , lowercase )
A_ : List[str] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
A_ : List[str] = output.prev_sample
A_ : List[str] = torch.sum(torch.abs(lowercase ) )
A_ : int = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def _a (self ):
A_ : str = self.scheduler_classes[0]
A_ : Optional[int] = self.get_scheduler_config()
A_ : Any = scheduler_class(**lowercase , use_karras_sigmas=lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase )
A_ : List[Any] = torch.manual_seed(0 )
A_ : Dict = self.dummy_model()
A_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ : Optional[Any] = sample.to(lowercase )
for t in scheduler.timesteps:
A_ : List[str] = scheduler.scale_model_input(lowercase , lowercase )
A_ : str = model(lowercase , lowercase )
A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase )
A_ : Optional[Any] = output.prev_sample
A_ : List[str] = torch.sum(torch.abs(lowercase ) )
A_ : str = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3 | 667 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
A_ : Optional[int] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
A_ : List[str] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
A_ : int = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
A_ : List[Any] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
A_ : Any = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
A_ : int = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
A_ : Optional[int] = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
A_ : int = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
A_ : Tuple = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
A_ : Union[str, Any] = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
A_ : Optional[int] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
A_ : Dict = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
A_ : Tuple = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
A_ : List[str] = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
A_ : str = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
A_ : List[str] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
A_ : Optional[int] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
A_ : List[Any] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
A_ : Tuple = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
A_ : str = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
A_ : Optional[int] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
A_ : Any = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
A_ : int = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
A_ : Union[str, Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : List[str] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ : Any = key.split(""".""" )
A_, A_ : str = int(key_split[2] ), int(key_split[4] )
A_ : int = config.vision_config.hidden_size
if "weight" in key:
A_ : Any = val[:dim, :]
A_ : Tuple = val[dim : dim * 2, :]
A_ : List[str] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : str = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ : Union[str, Any] = key.split(""".""" )
A_ : Any = int(key_split[3] )
A_ : Tuple = config.text_config.hidden_size
if "weight" in key:
A_ : Tuple = val[:dim, :]
A_ : str = val[
dim : dim * 2, :
]
A_ : Tuple = val[-dim:, :]
else:
A_ : str = val[:dim]
A_ : Union[str, Any] = val[dim : dim * 2]
A_ : Tuple = val[-dim:]
else:
A_ : Dict = rename_key(lowerCamelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A_ : Tuple = val.squeeze_()
else:
A_ : int = val
return orig_state_dict
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Any = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="groupvit-gcc-yfcc" , lowerCamelCase__=False ):
'''simple docstring'''
A_ : str = GroupViTConfig()
A_ : int = GroupViTModel(lowerCamelCase__ ).eval()
A_ : List[str] = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model"""]
A_ : str = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
A_, A_ : str = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase__ ) == 0)
# verify result
A_ : Union[str, Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
A_ : Any = prepare_img()
A_ : Dict = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="""pt""" )
with torch.no_grad():
A_ : int = model(**lowerCamelCase__ )
if model_name == "groupvit-gcc-yfcc":
A_ : List[Any] = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
A_ : Optional[int] = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCamelCase__ , atol=1E-3 )
processor.save_pretrained(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
print("""Successfully saved processor and model to""" , lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 667 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=0 , ):
A_ : List[Any] = parent
A_ : Union[str, Any] = batch_size
A_ : Tuple = seq_length
A_ : Optional[Any] = is_training
A_ : List[str] = use_input_mask
A_ : List[Any] = use_token_type_ids
A_ : List[str] = use_labels
A_ : Optional[int] = vocab_size
A_ : Any = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : int = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : Optional[Any] = num_labels
A_ : Union[str, Any] = num_choices
A_ : Optional[int] = scope
A_ : Dict = projection_dim
def _a (self ):
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : int = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Tuple = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Any = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : str = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
A_ : Dict = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = TFDPRContextEncoder(config=lowercase )
A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
A_ : Union[str, Any] = model(lowercase , token_type_ids=lowercase )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : str = TFDPRQuestionEncoder(config=lowercase )
A_ : Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
A_ : Union[str, Any] = model(lowercase , token_type_ids=lowercase )
A_ : Optional[int] = model(lowercase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : str = TFDPRReader(config=lowercase )
A_ : Tuple = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a (self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Dict = config_and_inputs
A_ : List[Any] = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[int] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Dict = False
def _a (self ):
A_ : Optional[int] = TFDPRModelTester(self )
A_ : Tuple = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase )
@slow
def _a (self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = TFDPRContextEncoder.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = TFDPRContextEncoder.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = TFDPRQuestionEncoder.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFDPRReader.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Tuple = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
A_ : List[str] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
A_ : Optional[int] = model(lowercase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A_ : Optional[int] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 667 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : Tuple = cst_fwd.get(lowerCamelCase__ , np.inf )
A_ : int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ : Union[str, Any] = new_cost_f
A_ : List[Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : List[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = -1
A_ : Dict = set()
A_ : List[Any] = set()
A_ : Tuple = {source: 0}
A_ : Optional[int] = {destination: 0}
A_ : Any = {source: None}
A_ : Union[str, Any] = {destination: None}
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_, A_ : Tuple = queue_forward.get()
visited_forward.add(lowerCamelCase__ )
A_, A_ : Any = queue_backward.get()
visited_backward.add(lowerCamelCase__ )
A_ : Tuple = pass_and_relaxation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
A_ : Optional[Any] = pass_and_relaxation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : Union[str, Any] = shortest_distance
return shortest_path_distance
lowerCamelCase :Dict = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowerCamelCase :Tuple = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 1 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : List[Any] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
A_ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : Dict = parent
A_ : Any = batch_size
A_ : Dict = seq_length
A_ : Union[str, Any] = is_training
A_ : str = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : int = vocab_size
A_ : List[Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Optional[Any] = initializer_range
A_ : List[str] = num_labels
A_ : List[Any] = num_choices
A_ : int = scope
A_ : str = embedding_size
def _a (self ):
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Optional[Any] = None
if self.use_token_type_ids:
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : List[str] = None
A_ : Tuple = None
A_ : Optional[int] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
A_ : List[str] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = TFMobileBertModel(config=lowercase )
A_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[str] = model(lowercase )
A_ : Tuple = [input_ids, input_mask]
A_ : Optional[Any] = model(lowercase )
A_ : List[str] = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Tuple = TFMobileBertForMaskedLM(config=lowercase )
A_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Tuple = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[Any] = TFMobileBertForNextSentencePrediction(config=lowercase )
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Dict = TFMobileBertForPreTraining(config=lowercase )
A_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Any = model(lowercase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = self.num_labels
A_ : Any = TFMobileBertForSequenceClassification(config=lowercase )
A_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Tuple = self.num_choices
A_ : int = TFMobileBertForMultipleChoice(config=lowercase )
A_ : List[str] = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
A_ : Dict = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
A_ : List[str] = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
A_ : Any = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A_ : Any = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Tuple = self.num_labels
A_ : Optional[int] = TFMobileBertForTokenClassification(config=lowercase )
A_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : int = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = TFMobileBertForQuestionAnswering(config=lowercase )
A_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[Any] = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self ):
A_ : Optional[int] = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Any = config_and_inputs
A_ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def _a (self ):
A_ : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase )
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase )
@slow
def _a (self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A_ : Optional[int] = TFMobileBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : int = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
A_ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ : int = model(lowercase )[0]
A_ : str = [1, 6, 30522]
self.assertEqual(output.shape , lowercase )
A_ : Dict = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 ) | 667 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : list[list[str]] = [[] for _ in range(lowerCamelCase__ )]
A_ : Tuple = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCamelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase__ ):
A_ : List[Any] = position % (lowest * 2) # puts it in bounds
A_ : Optional[Any] = min(lowerCamelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase__ )
A_ : List[Any] = ["""""".join(lowerCamelCase__ ) for row in temp_grid]
A_ : int = """""".join(lowerCamelCase__ )
return output_string
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
A_ : List[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
A_ : list[list[str]] = [[] for _ in range(lowerCamelCase__ )] # generates template
for position in range(len(lowerCamelCase__ ) ):
A_ : str = position % (lowest * 2) # puts it in bounds
A_ : str = min(lowerCamelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
A_ : str = 0
for row in temp_grid: # fills in the characters
A_ : Optional[Any] = input_string[counter : counter + len(lowerCamelCase__ )]
grid.append(list(lowerCamelCase__ ) )
counter += len(lowerCamelCase__ )
A_ : Tuple = """""" # reads as zigzag
for position in range(len(lowerCamelCase__ ) ):
A_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
A_ : str = min(lowerCamelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = {}
for key_guess in range(1 , len(lowerCamelCase__ ) ): # tries every key
A_ : Tuple = decrypt(lowerCamelCase__ , lowerCamelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
A_, A_ : List[str] = 1, 1
for _ in range(number_of_steps - 1 ):
A_, A_ : Optional[int] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = year % 19
A_ : Optional[Any] = year % 4
A_ : List[Any] = year % 7
A_ : Optional[Any] = math.floor(year / 1_00 )
A_ : int = math.floor((13 + 8 * leap_day_inhibits) / 25 )
A_ : Union[str, Any] = leap_day_inhibits / 4
A_ : Optional[int] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
A_ : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
A_ : str = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
A_ : List[str] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase__ , 4 , 18 )
else:
return datetime(lowerCamelCase__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCamelCase :int = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}") | 667 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 1 |
'''simple docstring'''
import os
def a ( ):
'''simple docstring'''
with open(os.path.dirname(lowerCamelCase__ ) + """/grid.txt""" ) as f:
A_ : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCamelCase__ ) for x in f.readline().split()] )
A_ : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
A_ : List[str] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ : Optional[int] = temp
# down
for i in range(17 ):
for j in range(20 ):
A_ : Dict = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ : Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
A_ : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ : str = temp
return maximum
if __name__ == "__main__":
print(solution()) | 667 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['keras_nlp']
def __init__(self , *lowercase , **lowercase ):
requires_backends(self , ["""keras_nlp"""] ) | 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : str = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=lowercase ).to(lowercase )
A_ : str = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A_ : Dict = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
A_ : Union[str, Any] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
A_ : str = model(input_ids.to(lowercase ) , labels=labels.to(lowercase ) ).loss
A_ : Union[str, Any] = -(labels.shape[-1] * loss.item())
A_ : Union[str, Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 667 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=False , lowercase=True , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ):
A_ : Union[str, Any] = parent
A_ : Dict = batch_size
A_ : List[str] = num_channels
A_ : Any = image_size
A_ : str = min_resolution
A_ : Union[str, Any] = max_resolution
A_ : List[str] = do_resize
A_ : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 20}
A_ : Tuple = do_thumbnail
A_ : Tuple = do_align_axis
A_ : Any = do_pad
A_ : Any = do_normalize
A_ : Dict = image_mean
A_ : Optional[Any] = image_std
def _a (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = DonutImageProcessor if is_vision_available() else None
def _a (self ):
A_ : Any = DonutImageProcessingTester(self )
@property
def _a (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
self.assertTrue(hasattr(lowercase , """do_thumbnail""" ) )
self.assertTrue(hasattr(lowercase , """do_align_long_axis""" ) )
self.assertTrue(hasattr(lowercase , """do_pad""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
def _a (self ):
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
A_ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
A_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def _a (self ):
pass
@is_flaky()
def _a (self ):
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ : Tuple = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _a (self ):
# Initialize image_processing
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ : Tuple = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _a (self ):
# Initialize image_processing
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , ) | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
from math import isqrt
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[Any] = False
return [i for i in range(2 , lowerCamelCase__ ) if is_prime[i]]
def a ( lowerCamelCase__ = 10**8 ):
'''simple docstring'''
A_ : Tuple = calculate_prime_numbers(max_number // 2 )
A_ : Union[str, Any] = 0
A_ : List[str] = 0
A_ : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase :Union[str, Any] = '''CompVis/stable-diffusion-v1-1'''
lowerCamelCase :Tuple = '''CompVis/stable-diffusion-v1-2'''
lowerCamelCase :List[Any] = '''CompVis/stable-diffusion-v1-3'''
lowerCamelCase :Optional[int] = '''CompVis/stable-diffusion-v1-4'''
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ):
super()._init_()
A_ : List[str] = StableDiffusionPipeline.from_pretrained(lowercase )
A_ : Optional[int] = StableDiffusionPipeline.from_pretrained(lowercase )
A_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowercase )
A_ : str = StableDiffusionPipeline(
vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , requires_safety_checker=lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _a (self ):
return {k: getattr(self , lowercase ) for k in self.config.keys() if not k.startswith("""_""" )}
def _a (self , lowercase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def _a (self ):
self.enable_attention_slicing(lowercase )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
A_ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
A_ : List[str] = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
A_ : Any = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
A_ : Union[str, Any] = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
A_ : str = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] ) | 667 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 1 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a ( lowerCamelCase__ ):
'''simple docstring'''
return EnvironmentCommand()
def a ( lowerCamelCase__ ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class _lowerCAmelCase ( __UpperCAmelCase ):
@staticmethod
def _a (lowercase ):
A_ : Union[str, Any] = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowercase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowercase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowercase )
def __init__(self , lowercase , *lowercase ):
A_ : Tuple = accelerate_config_file
def _a (self ):
A_ : Tuple = """not installed"""
if is_safetensors_available():
import safetensors
A_ : List[Any] = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ : Optional[Any] = F'{safetensors.__version__} but is ignored because of PyTorch version too old.'
A_ : List[str] = """not installed"""
A_ : List[str] = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowercase ):
A_ : Optional[int] = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ : List[str] = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowercase , lowercase )
else F'\t{accelerate_config}'
)
A_ : Dict = """not installed"""
A_ : Optional[int] = """NA"""
if is_torch_available():
import torch
A_ : int = torch.__version__
A_ : Optional[Any] = torch.cuda.is_available()
A_ : Union[str, Any] = """not installed"""
A_ : Any = """NA"""
if is_tf_available():
import tensorflow as tf
A_ : Optional[int] = tf.__version__
try:
# deprecated in v2.1
A_ : Optional[Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ : Union[str, Any] = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ : Tuple = """not installed"""
A_ : Tuple = """not installed"""
A_ : Tuple = """not installed"""
A_ : List[Any] = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ : Optional[Any] = flax.__version__
A_ : List[str] = jax.__version__
A_ : int = jaxlib.__version__
A_ : List[Any] = jax.lib.xla_bridge.get_backend().platform
A_ : Optional[int] = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'{safetensors_version}',
"""Accelerate version""": F'{accelerate_version}',
"""Accelerate config""": F'{accelerate_config_str}',
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""Tensorflow version (GPU?)""": F'{tf_version} ({tf_cuda_available})',
"""Flax version (CPU?/GPU?/TPU?)""": F'{flax_version} ({jax_backend})',
"""Jax version""": F'{jax_version}',
"""JaxLib version""": F'{jaxlib_version}',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowercase ) )
return info
@staticmethod
def _a (lowercase ):
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n" | 667 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 1 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCamelCase :List[Any] = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = UNetaDModel
__SCREAMING_SNAKE_CASE : Any = 'sample'
@property
def _a (self ):
A_ : str = 4
A_ : Tuple = 3
A_ : Union[str, Any] = (32, 32)
A_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
A_ : Optional[int] = torch.tensor([10] ).to(lowercase )
return {"sample": noise, "timestep": time_step}
@property
def _a (self ):
return (3, 32, 32)
@property
def _a (self ):
return (3, 32, 32)
def _a (self ):
A_ : List[Any] = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
A_ : Dict = self.dummy_input
return init_dict, inputs_dict
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel
__SCREAMING_SNAKE_CASE : str = 'sample'
@property
def _a (self ):
A_ : str = 4
A_ : Optional[int] = 4
A_ : str = (32, 32)
A_ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
A_ : str = torch.tensor([10] ).to(lowercase )
return {"sample": noise, "timestep": time_step}
@property
def _a (self ):
return (4, 32, 32)
@property
def _a (self ):
return (4, 32, 32)
def _a (self ):
A_ : Union[str, Any] = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_, A_ : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase )
A_ : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def _a (self ):
A_, A_ : Optional[int] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowercase )
model.to(lowercase )
A_ : Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def _a (self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
A_, A_ : Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowercase )
model_accelerate.to(lowercase )
model_accelerate.eval()
A_ : Dict = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
A_ : List[str] = noise.to(lowercase )
A_ : int = torch.tensor([10] * noise.shape[0] ).to(lowercase )
A_ : List[str] = model_accelerate(lowercase , lowercase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
A_, A_ : List[str] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=lowercase , low_cpu_mem_usage=lowercase )
model_normal_load.to(lowercase )
model_normal_load.eval()
A_ : List[Any] = model_normal_load(lowercase , lowercase )["""sample"""]
assert torch_all_close(lowercase , lowercase , rtol=1E-3 )
def _a (self ):
A_ : Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(lowercase )
A_ : int = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A_ : Optional[Any] = noise.to(lowercase )
A_ : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(lowercase )
with torch.no_grad():
A_ : List[str] = model(lowercase , lowercase ).sample
A_ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ : Dict = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1E-3 ) )
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel
__SCREAMING_SNAKE_CASE : str = 'sample'
@property
def _a (self , lowercase=(32, 32) ):
A_ : List[Any] = 4
A_ : Dict = 3
A_ : Any = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
A_ : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowercase )
return {"sample": noise, "timestep": time_step}
@property
def _a (self ):
return (3, 32, 32)
@property
def _a (self ):
return (3, 32, 32)
def _a (self ):
A_ : Union[str, Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
A_ : str = self.dummy_input
return init_dict, inputs_dict
@slow
def _a (self ):
A_, A_ : Optional[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase )
A_ : Optional[Any] = self.dummy_input
A_ : Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(lowercase )
A_ : Dict = noise
A_ : Optional[Any] = model(**lowercase )
assert image is not None, "Make sure output is not None"
@slow
def _a (self ):
A_ : List[str] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(lowercase )
A_ : Any = 4
A_ : List[Any] = 3
A_ : Any = (256, 256)
A_ : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(lowercase )
A_ : int = torch.tensor(batch_size * [1E-4] ).to(lowercase )
with torch.no_grad():
A_ : Optional[int] = model(lowercase , lowercase ).sample
A_ : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A_ : str = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1E-2 ) )
def _a (self ):
A_ : Union[str, Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(lowercase )
A_ : Optional[Any] = 4
A_ : Tuple = 3
A_ : Optional[Any] = (32, 32)
A_ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(lowercase )
A_ : Union[str, Any] = torch.tensor(batch_size * [1E-4] ).to(lowercase )
with torch.no_grad():
A_ : int = model(lowercase , lowercase ).sample
A_ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A_ : Optional[int] = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1E-2 ) )
def _a (self ):
# not required for this model
pass | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def _a (lowercase ):
raise NotImplementedError()
@abstractmethod
def _a (self ):
raise NotImplementedError() | 667 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if any(not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(lowerCamelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 667 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ ):
'''simple docstring'''
create_state_space_tree(lowerCamelCase__ , [] , 0 , [0 for i in range(len(lowerCamelCase__ ) )] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
if index == len(lowerCamelCase__ ):
print(lowerCamelCase__ )
return
for i in range(len(lowerCamelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A_ : Dict = True
create_state_space_tree(lowerCamelCase__ , lowerCamelCase__ , index + 1 , lowerCamelCase__ )
current_sequence.pop()
A_ : str = False
lowerCamelCase :list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase :list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 667 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase :Optional[Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ViTFeatureExtractor''']
lowerCamelCase :Optional[int] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A_ : Any = load_file(lowerCamelCase__ )
A_ : List[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A_ : Union[str, Any] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
A_ : Dict = pipeline.text_encoder
else:
A_ : Dict = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
A_ : Dict = pipeline.unet
# find the target layer
A_ : Optional[int] = layer_infos.pop(0 )
while len(lowerCamelCase__ ) > -1:
try:
A_ : Tuple = curr_layer.__getattr__(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ : Dict = layer_infos.pop(0 )
elif len(lowerCamelCase__ ) == 0:
break
except Exception:
if len(lowerCamelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A_ : int = layer_infos.pop(0 )
A_ : Dict = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowerCamelCase__ )
else:
pair_keys.append(lowerCamelCase__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A_ : List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A_ : Dict = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCamelCase__ , lowerCamelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
A_ : Union[str, Any] = state_dict[pair_keys[0]].to(torch.floataa )
A_ : Dict = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCamelCase__ , lowerCamelCase__ )
# update visited list
for item in pair_keys:
visited.append(lowerCamelCase__ )
return pipeline
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
lowerCamelCase :int = parser.parse_args()
lowerCamelCase :str = args.base_model_path
lowerCamelCase :Dict = args.checkpoint_path
lowerCamelCase :str = args.dump_path
lowerCamelCase :str = args.lora_prefix_unet
lowerCamelCase :Any = args.lora_prefix_text_encoder
lowerCamelCase :Tuple = args.alpha
lowerCamelCase :List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase :Union[str, Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 667 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase=3 , lowercase=3 , lowercase=("DownEncoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase=True , ):
super().__init__()
A_ : List[Any] = layers_per_block
A_ : Any = torch.nn.Convad(
lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ : List[Any] = None
A_ : int = nn.ModuleList([] )
# down
A_ : Any = block_out_channels[0]
for i, down_block_type in enumerate(lowercase ):
A_ : Dict = output_channel
A_ : int = block_out_channels[i]
A_ : Dict = i == len(lowercase ) - 1
A_ : Any = get_down_block(
lowercase , num_layers=self.layers_per_block , in_channels=lowercase , out_channels=lowercase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowercase , resnet_groups=lowercase , attention_head_dim=lowercase , temb_channels=lowercase , )
self.down_blocks.append(lowercase )
# mid
A_ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase , temb_channels=lowercase , )
# out
A_ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase , eps=1E-6 )
A_ : Union[str, Any] = nn.SiLU()
A_ : Dict = 2 * out_channels if double_z else out_channels
A_ : Dict = nn.Convad(block_out_channels[-1] , lowercase , 3 , padding=1 )
A_ : List[str] = False
def _a (self , lowercase ):
A_ : Optional[Any] = x
A_ : int = self.conv_in(lowercase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowercase )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase ) , lowercase , use_reentrant=lowercase )
# middle
A_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , use_reentrant=lowercase )
else:
for down_block in self.down_blocks:
A_ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase ) , lowercase )
# middle
A_ : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase )
else:
# down
for down_block in self.down_blocks:
A_ : int = down_block(lowercase )
# middle
A_ : Tuple = self.mid_block(lowercase )
# post-process
A_ : List[str] = self.conv_norm_out(lowercase )
A_ : Optional[Any] = self.conv_act(lowercase )
A_ : List[Any] = self.conv_out(lowercase )
return sample
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase=3 , lowercase=3 , lowercase=("UpDecoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase="group" , ):
super().__init__()
A_ : Optional[int] = layers_per_block
A_ : List[Any] = nn.Convad(
lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ : Union[str, Any] = None
A_ : List[str] = nn.ModuleList([] )
A_ : Dict = in_channels if norm_type == """spatial""" else None
# mid
A_ : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase , temb_channels=lowercase , )
# up
A_ : Tuple = list(reversed(lowercase ) )
A_ : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase ):
A_ : Dict = output_channel
A_ : Dict = reversed_block_out_channels[i]
A_ : Tuple = i == len(lowercase ) - 1
A_ : Union[str, Any] = get_up_block(
lowercase , num_layers=self.layers_per_block + 1 , in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowercase , resnet_groups=lowercase , attention_head_dim=lowercase , temb_channels=lowercase , resnet_time_scale_shift=lowercase , )
self.up_blocks.append(lowercase )
A_ : Optional[int] = output_channel
# out
if norm_type == "spatial":
A_ : Dict = SpatialNorm(block_out_channels[0] , lowercase )
else:
A_ : str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase , eps=1E-6 )
A_ : int = nn.SiLU()
A_ : Tuple = nn.Convad(block_out_channels[0] , lowercase , 3 , padding=1 )
A_ : Tuple = False
def _a (self , lowercase , lowercase=None ):
A_ : Dict = z
A_ : int = self.conv_in(lowercase )
A_ : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowercase )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , lowercase , use_reentrant=lowercase )
A_ : List[Any] = sample.to(lowercase )
# up
for up_block in self.up_blocks:
A_ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase ) , lowercase , lowercase , use_reentrant=lowercase )
else:
# middle
A_ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase , lowercase )
A_ : Tuple = sample.to(lowercase )
# up
for up_block in self.up_blocks:
A_ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase ) , lowercase , lowercase )
else:
# middle
A_ : int = self.mid_block(lowercase , lowercase )
A_ : Any = sample.to(lowercase )
# up
for up_block in self.up_blocks:
A_ : Optional[int] = up_block(lowercase , lowercase )
# post-process
if latent_embeds is None:
A_ : Optional[int] = self.conv_norm_out(lowercase )
else:
A_ : Any = self.conv_norm_out(lowercase , lowercase )
A_ : Optional[int] = self.conv_act(lowercase )
A_ : int = self.conv_out(lowercase )
return sample
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase , lowercase , lowercase=None , lowercase="random" , lowercase=False , lowercase=True ):
super().__init__()
A_ : int = n_e
A_ : Union[str, Any] = vq_embed_dim
A_ : List[str] = beta
A_ : List[Any] = legacy
A_ : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ : Optional[int] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ : Tuple = self.used.shape[0]
A_ : Tuple = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ : Tuple = self.re_embed
A_ : int = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A_ : int = n_e
A_ : str = sane_index_shape
def _a (self , lowercase ):
A_ : Optional[int] = inds.shape
assert len(lowercase ) > 1
A_ : Optional[Any] = inds.reshape(ishape[0] , -1 )
A_ : Dict = self.used.to(lowercase )
A_ : int = (inds[:, :, None] == used[None, None, ...]).long()
A_ : List[Any] = match.argmax(-1 )
A_ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ : str = self.unknown_index
return new.reshape(lowercase )
def _a (self , lowercase ):
A_ : Dict = inds.shape
assert len(lowercase ) > 1
A_ : Tuple = inds.reshape(ishape[0] , -1 )
A_ : Optional[int] = self.used.to(lowercase )
if self.re_embed > self.used.shape[0]: # extra token
A_ : Tuple = 0 # simply set to zero
A_ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase )
return back.reshape(lowercase )
def _a (self , lowercase ):
# reshape z -> (batch, height, width, channel) and flatten
A_ : List[Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ : Any = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ : Optional[int] = torch.argmin(torch.cdist(lowercase , self.embedding.weight ) , dim=1 )
A_ : Tuple = self.embedding(lowercase ).view(z.shape )
A_ : Tuple = None
A_ : Optional[int] = None
# compute loss for embedding
if not self.legacy:
A_ : str = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
A_ : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ : Union[str, Any] = self.remap_to_used(lowercase )
A_ : Dict = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ : Any = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _a (self , lowercase , lowercase ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ : str = indices.reshape(shape[0] , -1 ) # add batch axis
A_ : List[Any] = self.unmap_to_all(lowercase )
A_ : List[str] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ : int = self.embedding(lowercase )
if shape is not None:
A_ : str = z_q.view(lowercase )
# reshape back to match original input shape
A_ : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=False ):
A_ : Tuple = parameters
A_, A_ : Dict = torch.chunk(lowercase , 2 , dim=1 )
A_ : Any = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ : Dict = deterministic
A_ : Any = torch.exp(0.5 * self.logvar )
A_ : Dict = torch.exp(self.logvar )
if self.deterministic:
A_ : Union[str, Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _a (self , lowercase = None ):
# make sure sample is on the same device as the parameters and has same dtype
A_ : int = randn_tensor(
self.mean.shape , generator=lowercase , device=self.parameters.device , dtype=self.parameters.dtype )
A_ : Optional[int] = self.mean + self.std * sample
return x
def _a (self , lowercase=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _a (self , lowercase , lowercase=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A_ : str = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase )
def _a (self ):
return self.mean | 667 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = str(lowerCamelCase__ )
A_ : List[str] = [n]
for i in range(1 , len(lowerCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(str(lowerCamelCase__ ) ) > 3:
if not is_prime(int(str(lowerCamelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCamelCase__ )[:3] ) ):
return False
return True
def a ( lowerCamelCase__ = 11 ):
'''simple docstring'''
A_ : list[int] = []
A_ : Optional[int] = 13
while len(lowerCamelCase__ ) != count:
if validate(lowerCamelCase__ ):
A_ : Any = list_truncated_nums(lowerCamelCase__ )
if all(is_prime(lowerCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase__ )
num += 2
return list_truncated_primes
def a ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"{sum(compute_truncated_primes(1_1)) = }") | 667 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :Dict = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : BigBirdConfig
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
def _a (self ):
super().setup()
A_ : int = nn.Dense(5 , dtype=self.dtype )
def __call__(self , *lowercase , **lowercase ):
A_ : str = super().__call__(*lowercase , **lowercase )
A_ : str = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = FlaxBigBirdForNaturalQuestionsModule
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def cross_entropy(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
A_ : Tuple = logits.shape[-1]
A_ : Dict = (labels[..., None] == jnp.arange(lowerCamelCase__ )[None]).astype("""f4""" )
A_ : List[str] = jax.nn.log_softmax(lowerCamelCase__ , axis=-1 )
A_ : List[str] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
A_ : List[str] = reduction(lowerCamelCase__ )
return loss
A_ : List[Any] = partial(lowerCamelCase__ , reduction=jnp.mean )
A_ : Tuple = cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
A_ : int = cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
A_ : str = cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = "google/bigbird-roberta-base"
__SCREAMING_SNAKE_CASE : int = 3_000
__SCREAMING_SNAKE_CASE : int = 10_500
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 3
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 5
# tx_args
__SCREAMING_SNAKE_CASE : float = 3E-5
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : int = 20_000
__SCREAMING_SNAKE_CASE : float = 0.0_095
__SCREAMING_SNAKE_CASE : str = "bigbird-roberta-natural-questions"
__SCREAMING_SNAKE_CASE : str = "training-expt"
__SCREAMING_SNAKE_CASE : str = "data/nq-training.jsonl"
__SCREAMING_SNAKE_CASE : str = "data/nq-validation.jsonl"
def _a (self ):
os.makedirs(self.base_dir , exist_ok=lowercase )
A_ : Union[str, Any] = os.path.join(self.base_dir , self.save_dir )
A_ : Union[str, Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int = 4_096 # no dynamic padding on TPUs
def __call__(self , lowercase ):
A_ : Optional[Any] = self.collate_fn(lowercase )
A_ : List[Any] = jax.tree_util.tree_map(lowercase , lowercase )
return batch
def _a (self , lowercase ):
A_, A_ : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] )
A_ : Tuple = {
"""input_ids""": jnp.array(lowercase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(lowercase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _a (self , lowercase ):
A_ : int = [self._fetch_inputs(lowercase ) for ids in input_ids]
return zip(*lowercase )
def _a (self , lowercase ):
A_ : List[Any] = [1 for _ in range(len(lowercase ) )]
while len(lowercase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
if seed is not None:
A_ : Dict = dataset.shuffle(seed=lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) // batch_size ):
A_ : List[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
def loss_fn(lowerCamelCase__ ):
A_ : List[Any] = model_inputs.pop("""start_labels""" )
A_ : List[str] = model_inputs.pop("""end_labels""" )
A_ : Tuple = model_inputs.pop("""pooled_labels""" )
A_ : int = state.apply_fn(**lowerCamelCase__ , params=lowerCamelCase__ , dropout_rng=lowerCamelCase__ , train=lowerCamelCase__ )
A_, A_, A_ : Union[str, Any] = outputs
return state.loss_fn(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
A_, A_ : List[str] = jax.random.split(lowerCamelCase__ )
A_ : Optional[Any] = jax.value_and_grad(lowerCamelCase__ )
A_, A_ : Union[str, Any] = grad_fn(state.params )
A_ : Dict = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
A_ : Optional[Any] = jax.lax.pmean(lowerCamelCase__ , """batch""" )
A_ : Optional[Any] = state.apply_gradients(grads=lowerCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def a ( lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
A_ : str = model_inputs.pop("""start_labels""" )
A_ : int = model_inputs.pop("""end_labels""" )
A_ : List[str] = model_inputs.pop("""pooled_labels""" )
A_ : Any = state.apply_fn(**lowerCamelCase__ , params=state.params , train=lowerCamelCase__ )
A_, A_, A_ : List[str] = outputs
A_ : str = state.loss_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : int = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _lowerCAmelCase ( train_state.TrainState ):
__SCREAMING_SNAKE_CASE : Callable = struct.field(pytree_node=__UpperCAmelCase )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Args
__SCREAMING_SNAKE_CASE : Callable
__SCREAMING_SNAKE_CASE : Callable
__SCREAMING_SNAKE_CASE : Callable
__SCREAMING_SNAKE_CASE : Callable
__SCREAMING_SNAKE_CASE : wandb
__SCREAMING_SNAKE_CASE : Callable = None
def _a (self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Optional[Any] = model.params
A_ : Dict = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
A_, A_, A_, A_, A_ : Union[str, Any] = restore_checkpoint(lowercase , lowercase )
A_ : Optional[Any] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
A_, A_ : Dict = build_tx(**lowercase )
A_ : Union[str, Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
A_ : int = args
A_ : List[Any] = data_collator
A_ : Union[str, Any] = lr
A_ : Optional[int] = params
A_ : int = jax_utils.replicate(lowercase )
return state
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[int] = self.args
A_ : Union[str, Any] = len(lowercase ) // args.batch_size
A_ : Dict = jax.random.PRNGKey(0 )
A_ : List[Any] = jax.random.split(lowercase , jax.device_count() )
for epoch in range(args.max_epochs ):
A_ : int = jnp.array(0 , dtype=jnp.floataa )
A_ : int = get_batched_dataset(lowercase , args.batch_size , seed=lowercase )
A_ : List[str] = 0
for batch in tqdm(lowercase , total=lowercase , desc=F'Running EPOCH-{epoch}' ):
A_ : Any = self.data_collator(lowercase )
A_, A_, A_ : Dict = self.train_step_fn(lowercase , lowercase , **lowercase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
A_ : Optional[int] = jax_utils.unreplicate(state.step )
A_ : str = running_loss.item() / i
A_ : Union[str, Any] = self.scheduler_fn(state_step - 1 )
A_ : Optional[int] = self.evaluate(lowercase , lowercase )
A_ : Optional[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(lowercase ) )
self.logger.log(lowercase , commit=lowercase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=lowercase )
def _a (self , lowercase , lowercase ):
A_ : Dict = get_batched_dataset(lowercase , self.args.batch_size )
A_ : Tuple = len(lowercase ) // self.args.batch_size
A_ : int = jnp.array(0 , dtype=jnp.floataa )
A_ : Tuple = 0
for batch in tqdm(lowercase , total=lowercase , desc="""Evaluating ... """ ):
A_ : Union[str, Any] = self.data_collator(lowercase )
A_ : List[str] = self.val_step_fn(lowercase , **lowercase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _a (self , lowercase , lowercase ):
A_ : Optional[int] = jax_utils.unreplicate(lowercase )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """ )
self.model_save_fn(lowercase , params=state.params )
with open(os.path.join(lowercase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowercase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(lowercase , """data_collator.joblib""" ) )
with open(os.path.join(lowercase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , lowercase )
print("""DONE""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowerCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
A_ : List[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
A_ : Optional[int] = from_bytes(state.opt_state , f.read() )
A_ : Any = joblib.load(os.path.join(lowerCamelCase__ , """args.joblib""" ) )
A_ : Union[str, Any] = joblib.load(os.path.join(lowerCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(lowerCamelCase__ , """training_state.json""" ) , """r""" ) as f:
A_ : int = json.load(lowerCamelCase__ )
A_ : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = num_train_steps - warmup_steps
A_ : List[Any] = optax.linear_schedule(init_value=lowerCamelCase__ , end_value=lowerCamelCase__ , transition_steps=lowerCamelCase__ )
A_ : Union[str, Any] = optax.linear_schedule(init_value=lowerCamelCase__ , end_value=1E-7 , transition_steps=lowerCamelCase__ )
A_ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def weight_decay_mask(lowerCamelCase__ ):
A_ : List[Any] = traverse_util.flatten_dict(lowerCamelCase__ )
A_ : Optional[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase__ )
A_ : int = scheduler_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : Tuple = optax.adamw(learning_rate=lowerCamelCase__ , weight_decay=lowerCamelCase__ , mask=lowerCamelCase__ )
return tx, lr | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_00 , ):
'''simple docstring'''
A_ : Optional[int] = x_start
A_ : Dict = fnc(lowerCamelCase__ )
A_ : Union[str, Any] = 0.0
for _ in range(lowerCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A_ : Optional[Any] = (x_end - x_start) / steps + xa
A_ : List[Any] = fnc(lowerCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A_ : Tuple = xa
A_ : Optional[Any] = fxa
return area
if __name__ == "__main__":
def a ( lowerCamelCase__ ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCamelCase :Any = 1_0
while i <= 1_0_0_0_0_0:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 1_0 | 667 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCamelCase :Optional[Any] = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
lowerCamelCase :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( ):
'''simple docstring'''
A_ : Optional[int] = """https://pypi.org/pypi/diffusers/json"""
A_ : Any = json.loads(request.urlopen(lowerCamelCase__ ).read() )["""releases"""].keys()
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : version.Version(lowerCamelCase__ ) )
def a ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
A_ : Optional[Any] = Path(lowerCamelCase__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def a ( lowerCamelCase__ ):
'''simple docstring'''
init_hf_modules()
A_ : Union[str, Any] = Path(lowerCamelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
A_ : List[Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
A_ : str = f.read()
# Imports of the form `import .xxx`
A_ : str = re.findall("""^\s*import\s+\.(\S+)\s*$""" , lowerCamelCase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , lowerCamelCase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase__ ) )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = False
A_ : Tuple = [module_file]
A_ : List[Any] = []
# Let's recurse through all relative imports
while not no_change:
A_ : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase__ ) )
A_ : Any = Path(lowerCamelCase__ ).parent
A_ : Optional[int] = [str(module_path / m ) for m in new_imports]
A_ : List[Any] = [f for f in new_import_files if f not in all_relative_imports]
A_ : List[str] = [f'{f}.py' for f in new_import_files]
A_ : List[str] = len(lowerCamelCase__ ) == 0
all_relative_imports.extend(lowerCamelCase__ )
return all_relative_imports
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
A_ : Optional[Any] = f.read()
# Imports of the form `import xxx`
A_ : Optional[int] = re.findall("""^\s*import\s+(\S+)\s*$""" , lowerCamelCase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , lowerCamelCase__ , flags=re.MULTILINE )
# Only keep the top-level module
A_ : Optional[Any] = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
A_ : Optional[Any] = list(set(lowerCamelCase__ ) )
A_ : List[Any] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase__ )
except ImportError:
missing_packages.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f'{", ".join(lowerCamelCase__ )}. Run `pip install {" ".join(lowerCamelCase__ )}`' )
return get_relative_imports(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = module_path.replace(os.path.sep , """.""" )
A_ : Dict = importlib.import_module(lowerCamelCase__ )
if class_name is None:
return find_pipeline_class(lowerCamelCase__ )
return getattr(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
A_ : List[str] = dict(inspect.getmembers(lowerCamelCase__ , inspect.isclass ) )
A_ : Any = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
A_ : Tuple = cls
return pipeline_class
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , ):
'''simple docstring'''
A_ : Optional[Any] = str(lowerCamelCase__ )
A_ : List[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if os.path.isfile(lowerCamelCase__ ):
A_ : Union[str, Any] = module_file_or_url
A_ : Optional[Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
A_ : Union[str, Any] = get_diffusers_versions()
# cut ".dev0"
A_ : Dict = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
A_ : Optional[Any] = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
A_ : Union[str, Any] = f'v{revision}'
elif revision == "main":
A_ : List[str] = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
A_ : Dict = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase__ , pipeline=lowerCamelCase__ )
try:
A_ : int = cached_download(
lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , )
A_ : Dict = """git"""
A_ : Any = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
A_ : List[str] = hf_hub_download(
lowerCamelCase__ , lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , proxies=lowerCamelCase__ , resume_download=lowerCamelCase__ , local_files_only=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , )
A_ : Dict = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
A_ : str = check_imports(lowerCamelCase__ )
# Now we move the module inside our cached dynamic modules.
A_ : Optional[int] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase__ )
A_ : Dict = Path(lowerCamelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase__ , submodule_path / module_file )
for module_needed in modules_needed:
A_ : int = f'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : List[Any] = use_auth_token
elif use_auth_token is True:
A_ : List[Any] = HfFolder.get_token()
else:
A_ : Optional[Any] = None
A_ : int = model_info(lowerCamelCase__ , revision=lowerCamelCase__ , token=lowerCamelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A_ : Any = submodule_path / commit_hash
A_ : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase__ , f'{module_needed}.py' , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , resume_download=lowerCamelCase__ , proxies=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , local_files_only=lowerCamelCase__ , )
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , **lowerCamelCase__ , ):
'''simple docstring'''
A_ : Union[str, Any] = get_cached_module_file(
lowerCamelCase__ , lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , resume_download=lowerCamelCase__ , proxies=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , local_files_only=lowerCamelCase__ , )
return get_class_in_module(lowerCamelCase__ , final_module.replace(""".py""" , """""" ) ) | 667 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = list(range(len(lowerCamelCase__ ) ) )
A_ : Optional[Any] = [v / w for v, w in zip(lowerCamelCase__ , lowerCamelCase__ )]
index.sort(key=lambda lowerCamelCase__ : ratio[i] , reverse=lowerCamelCase__ )
A_ : float = 0
A_ : list[float] = [0] * len(lowerCamelCase__ )
for i in index:
if weight[i] <= capacity:
A_ : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
A_ : List[str] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[str] = tempfile.mkdtemp()
# fmt: off
A_ : List[Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ : List[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A_ : int = {"""unk_token""": """<unk>"""}
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
A_ : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : int = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase , lowercase )
def _a (self , **lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowercase )
def _a (self , **lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowercase )
def _a (self , **lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : Any = self.get_tokenizer()
A_ : str = self.get_rust_tokenizer()
A_ : List[str] = self.get_image_processor()
A_ : List[str] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
A_ : int = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def _a (self ):
A_ : int = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : int = self.get_image_processor(do_normalize=lowercase )
A_ : Tuple = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _a (self ):
A_ : Union[str, Any] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Any = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : List[str] = image_processor(lowercase , return_tensors="""np""" )
A_ : Tuple = processor(images=lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : Optional[Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Tuple = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : str = """lower newer"""
A_ : Union[str, Any] = processor(text=lowercase , return_tensors="""np""" )
A_ : Tuple = tokenizer(lowercase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _a (self ):
A_ : Optional[int] = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Dict = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Union[str, Any] = """lower newer"""
A_ : Dict = self.prepare_image_inputs()
A_ : List[Any] = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = """google/owlvit-base-patch32"""
A_ : Dict = OwlViTProcessor.from_pretrained(lowercase )
A_ : List[Any] = ["""cat""", """nasa badge"""]
A_ : List[Any] = processor(text=lowercase )
A_ : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : str = """google/owlvit-base-patch32"""
A_ : Union[str, Any] = OwlViTProcessor.from_pretrained(lowercase )
A_ : Optional[Any] = [["""cat""", """nasa badge"""], ["""person"""]]
A_ : Tuple = processor(text=lowercase )
A_ : str = 16
A_ : int = len(lowercase )
A_ : int = max([len(lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Optional[int] = """google/owlvit-base-patch32"""
A_ : Any = OwlViTProcessor.from_pretrained(lowercase )
A_ : Union[str, Any] = ["""cat""", """nasa badge"""]
A_ : Optional[int] = processor(text=lowercase )
A_ : str = 16
A_ : Optional[int] = inputs["""input_ids"""]
A_ : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Dict = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : int = self.prepare_image_inputs()
A_ : List[str] = self.prepare_image_inputs()
A_ : Dict = processor(images=lowercase , query_images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Any = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(lowercase )
A_ : Union[str, Any] = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase ) | 667 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Dict = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase , lowercase ):
A_ : str = False
super().__init__(lowercase , lowercase )
A_ : Union[str, Any] = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : List[str] = self.tokenizer
A_ : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
A_ : List[str] = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
A_ : Dict = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
A_ : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
A_ : Tuple = self.tokenizer.model_input_names
A_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 667 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 1 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Dict = 0
A_ : Tuple = [0]
A_ : int = [0]
A_ : str = len(lowercase )
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase ) , 0 )
A_ : Any = [60]
A_ : Optional[int] = [10]
A_ : str = len(lowercase )
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase ) , 0 )
def _a (self ):
A_ : Optional[Any] = 3
A_ : Optional[int] = [1, 2, 3]
A_ : Optional[int] = [3, 2, 1]
A_ : List[Any] = len(lowercase )
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase ) , 5 )
def _a (self ):
A_ : Any = 50
A_ : List[str] = [60, 100, 120]
A_ : Dict = [10, 20, 30]
A_ : Optional[int] = len(lowercase )
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase ) , 220 )
if __name__ == "__main__":
unittest.main() | 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :int = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
A_ : Dict = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ : List[str] = 1
if upper_limit > 0:
A_ : Any = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCamelCase :Any = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def a ( lowerCamelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
A_ : Tuple = 2
while True:
if is_prime(lowerCamelCase__ ):
yield num
num += 1
def a ( lowerCamelCase__ = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda lowerCamelCase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowerCAmelCase ( __UpperCAmelCase ):
@require_torch
def _a (self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A_ : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
A_ : Tuple = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
A_ : Any = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
A_ : Union[str, Any] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowercase )
BertModel.from_pretrained(lowercase )
BertTokenizer.from_pretrained(lowercase )
pipeline(task="""fill-mask""" , model=lowercase )
# baseline - just load from_pretrained with normal network
A_ : Any = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
A_ : Optional[int] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ : Dict = """1"""
A_ : str = subprocess.run(lowercase , env=lowercase , check=lowercase , capture_output=lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def _a (self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A_ : Tuple = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
A_ : Dict = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
A_ : List[str] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
A_ : Any = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowercase )
BertModel.from_pretrained(lowercase )
BertTokenizer.from_pretrained(lowercase )
pipeline(task="""fill-mask""" , model=lowercase )
# baseline - just load from_pretrained with normal network
A_ : Any = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
A_ : Tuple = self.get_env()
A_ : List[str] = subprocess.run(lowercase , env=lowercase , check=lowercase , capture_output=lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def _a (self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A_ : Tuple = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
A_ : Union[str, Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
A_ : str = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
A_ : Optional[Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
A_ : Union[str, Any] = self.get_env()
A_ : Any = subprocess.run(lowercase , env=lowercase , check=lowercase , capture_output=lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
A_ : int = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ : List[str] = """1"""
A_ : Any = subprocess.run(lowercase , env=lowercase , check=lowercase , capture_output=lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def _a (self ):
A_ : Any = """
from transformers import pipeline
"""
A_ : Union[str, Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
A_ : Dict = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
A_ : str = self.get_env()
A_ : Optional[Any] = """1"""
A_ : Union[str, Any] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
A_ : Union[str, Any] = subprocess.run(lowercase , env=lowercase , check=lowercase , capture_output=lowercase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def _a (self ):
A_ : Dict = """
from transformers import AutoModel
"""
A_ : Optional[Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
A_ : Tuple = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
A_ : Tuple = self.get_env()
A_ : int = subprocess.run(lowercase , env=lowercase , check=lowercase , capture_output=lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ : str = """1"""
A_ : List[Any] = subprocess.run(lowercase , env=lowercase , check=lowercase , capture_output=lowercase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() ) | 667 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'upernet'
def __init__(self , lowercase=None , lowercase=512 , lowercase=0.02 , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=384 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A_ : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(lowercase , lowercase ):
A_ : List[Any] = backbone_config.get("""model_type""" )
A_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
A_ : str = config_class.from_dict(lowercase )
A_ : str = backbone_config
A_ : str = hidden_size
A_ : str = initializer_range
A_ : List[str] = pool_scales
A_ : List[Any] = use_auxiliary_head
A_ : Dict = auxiliary_loss_weight
A_ : Any = auxiliary_in_channels
A_ : Any = auxiliary_channels
A_ : Optional[int] = auxiliary_num_convs
A_ : int = auxiliary_concat_input
A_ : List[Any] = loss_ignore_index
def _a (self ):
A_ : Tuple = copy.deepcopy(self.__dict__ )
A_ : Any = self.backbone_config.to_dict()
A_ : int = self.__class__.model_type
return output | 667 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
__SCREAMING_SNAKE_CASE : Any = PandasConfig
def _a (self ):
return datasets.DatasetInfo(features=self.config.features )
def _a (self , lowercase ):
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase , (str, list, tuple) ):
A_ : List[Any] = data_files
if isinstance(lowercase , lowercase ):
A_ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : int = [dl_manager.iter_files(lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A_ : List[str] = []
for split_name, files in data_files.items():
if isinstance(lowercase , lowercase ):
A_ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : Optional[int] = [dl_manager.iter_files(lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase , gen_kwargs={"""files""": files} ) )
return splits
def _a (self , lowercase ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : Tuple = table_cast(lowercase , self.config.features.arrow_schema )
return pa_table
def _a (self , lowercase ):
for i, file in enumerate(itertools.chain.from_iterable(lowercase ) ):
with open(lowercase , """rb""" ) as f:
A_ : Dict = pa.Table.from_pandas(pd.read_pickle(lowercase ) )
yield i, self._cast_table(lowercase ) | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 1 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
A_ : List[str] = parent
A_ : Union[str, Any] = batch_size
A_ : str = image_size
A_ : Optional[int] = num_channels
A_ : str = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Union[str, Any] = depths
A_ : int = is_training
A_ : Any = use_labels
A_ : Optional[Any] = hidden_act
A_ : Optional[int] = num_labels
A_ : Dict = scope
A_ : Union[str, Any] = len(lowercase )
def _a (self ):
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : int = TFResNetModel(config=lowercase )
A_ : Union[str, Any] = model(lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = self.num_labels
A_ : str = TFResNetForImageClassification(lowercase )
A_ : Optional[int] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ):
A_ : Any = self.prepare_config_and_inputs()
A_, A_, A_ : str = config_and_inputs
A_ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Any = False
def _a (self ):
A_ : Optional[Any] = TFResNetModelTester(self )
A_ : Any = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def _a (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def _a (self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(lowercase )
A_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[int] = [*signature.parameters.keys()]
A_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : int = model_class(lowercase )
A_ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_, A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Optional[Any] = layer_type
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFResNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a (self ):
A_ : Union[str, Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : List[Any] = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : List[Any] = image_processor(images=lowercase , return_tensors="""tf""" )
# forward pass
A_ : Any = model(**lowercase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[str] = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase , atol=1E-4 ) ) | 667 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Tuple = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 1 |
'''simple docstring'''
import argparse
import datetime
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
A_ : Union[str, Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
A_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
A_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
A_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
A_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
A_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
A_ : Union[str, Any] = datetime.date(int(lowerCamelCase__ ) , int(lowerCamelCase__ ) , int(lowerCamelCase__ ) )
# Start math
if m <= 2:
A_ : Union[str, Any] = y - 1
A_ : List[Any] = m + 12
# maths var
A_ : int = int(str(lowerCamelCase__ )[:2] )
A_ : int = int(str(lowerCamelCase__ )[2:] )
A_ : int = int(2.6 * m - 5.39 )
A_ : int = int(c / 4 )
A_ : int = int(k / 4 )
A_ : int = int(d + k )
A_ : int = int(t + u + v + x )
A_ : int = int(z - (2 * c) )
A_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
A_ : str = f'Your date {date_input}, is a {days[str(lowerCamelCase__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :List[Any] = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCamelCase :str = parser.parse_args()
zeller(args.date_input) | 667 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowerCAmelCase :
def __init__(self , lowercase = "cpu" , lowercase = "openai/clip-vit-large-patch14" ):
A_ : Tuple = device
A_ : Union[str, Any] = CLIPTokenizerFast.from_pretrained(lowercase )
A_ : Any = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
A_ : List[Any] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
A_ : int = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A_ : Union[str, Any] = torchvision.transforms.Resize(224 )
A_ : Optional[int] = torchvision.transforms.CenterCrop(224 )
def _a (self , lowercase ):
A_ : Dict = self.resize(lowercase )
A_ : Union[str, Any] = self.center_crop(lowercase )
A_ : List[Any] = self.normalize(lowercase )
return images
def __call__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Any = self.tokenizer(text=lowercase , **lowercase )
A_ : Optional[int] = self.preprocess_img(lowercase )
A_ : int = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase=10 , lowercase=0.01 , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=False , lowercase=True , lowercase="image" , lowercase=True , lowercase=False , lowercase=False , lowercase=False , ):
super().__init__()
A_ : Optional[int] = None
A_ : str = device if device else get_device()
if vqgan:
A_ : List[Any] = vqgan
else:
A_ : str = load_vqgan(self.device , conf_path=lowercase , ckpt_path=lowercase )
self.vqgan.eval()
if clip:
A_ : List[Any] = clip
else:
A_ : List[str] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
A_ : Any = ProcessorGradientFlow(device=self.device )
A_ : Dict = iterations
A_ : List[Any] = lr
A_ : Optional[int] = log
A_ : Dict = make_grid
A_ : str = return_val
A_ : str = quantize
A_ : Optional[Any] = self.vqgan.decoder.z_shape
def _a (self , lowercase=None , lowercase=None , lowercase=5 , lowercase=True ):
A_ : Tuple = []
if output_path is None:
A_ : List[Any] = """./animation.gif"""
if input_path is None:
A_ : Tuple = self.save_path
A_ : Optional[int] = sorted(glob(input_path + """/*""" ) )
if not len(lowercase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowercase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
A_ : List[str] = total_duration / len(lowercase )
A_ : Optional[Any] = [frame_duration] * len(lowercase )
if extend_frames:
A_ : Optional[int] = 1.5
A_ : List[str] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowercase ) )
imageio.mimsave(lowercase , lowercase , duration=lowercase )
print(F'gif saved to {output_path}' )
def _a (self , lowercase=None , lowercase=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
A_ : int = preprocess(Image.open(lowercase ) , target_image_size=256 ).to(self.device )
A_ : Tuple = preprocess_vqgan(lowercase )
A_, *A_ : int = self.vqgan.encode(lowercase )
return z
def _a (self , lowercase ):
A_ : Optional[Any] = self.latent.detach().requires_grad_()
A_ : Union[str, Any] = base_latent + transform_vector
if self.quantize:
A_, *A_ : Any = self.vqgan.quantize(lowercase )
else:
A_ : Any = trans_latent
return self.vqgan.decode(lowercase )
def _a (self , lowercase , lowercase , lowercase=None ):
A_ : Optional[Any] = self.clip_preprocessor(text=lowercase , images=lowercase , return_tensors="""pt""" , padding=lowercase )
A_ : Union[str, Any] = self.clip(**lowercase )
A_ : Union[str, Any] = clip_outputs.logits_per_image
if weights is not None:
A_ : Optional[Any] = similarity_logits * weights
return similarity_logits.sum()
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , lowercase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
A_ : str = self._get_clip_similarity(neg_prompts["""prompts"""] , lowercase , weights=neg_prompts["""weights"""] )
else:
A_ : Tuple = torch.tensor([1] , device=self.device )
A_ : Optional[Any] = -torch.log(lowercase ) + torch.log(lowercase )
return loss
def _a (self , lowercase , lowercase , lowercase ):
A_ : Tuple = torch.randn_like(self.latent , requires_grad=lowercase , device=self.device )
A_ : Optional[int] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A_ : List[str] = self._add_vector(lowercase )
A_ : Dict = loop_post_process(lowercase )
A_ : List[Any] = self._get_CLIP_loss(lowercase , lowercase , lowercase )
print("""CLIP loss""" , lowercase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowercase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _a (self , lowercase , lowercase , lowercase ):
wandb.init(reinit=lowercase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
A_ : str = Image.open(lowercase )
A_ : Union[str, Any] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(lowercase ) )
def _a (self , lowercase ):
if not prompts:
return []
A_ : int = []
A_ : Any = []
if isinstance(lowercase , lowercase ):
A_ : Tuple = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowercase , (tuple, list) ):
A_ : Dict = prompt[0]
A_ : Tuple = float(prompt[1] )
elif ":" in prompt:
A_, A_ : Union[str, Any] = prompt.split(""":""" )
A_ : List[str] = float(lowercase )
else:
A_ : Dict = prompt
A_ : Optional[int] = 1.0
processed_prompts.append(lowercase )
weights.append(lowercase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase , device=self.device ),
}
def _a (self , lowercase , lowercase=None , lowercase=None , lowercase=True , lowercase=False , lowercase=True , lowercase=True , lowercase=None , ):
if image_path:
A_ : List[Any] = self._get_latent(lowercase )
else:
A_ : Dict = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowercase , lowercase , lowercase )
assert pos_prompts, "You must provide at least one positive prompt."
A_ : List[Any] = self.process_prompts(lowercase )
A_ : Dict = self.process_prompts(lowercase )
if save_final and save_path is None:
A_ : str = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
A_ : str = save_path + """_""" + get_timestamp()
os.makedirs(lowercase )
A_ : List[Any] = save_path
A_ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowercase ) )
A_ : Optional[Any] = loop_post_process(lowercase )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase , lowercase , lowercase ) ):
if show_intermediate:
show_pil(lowercase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowercase )} )
if show_final:
show_pil(lowercase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) ) | 667 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase :Dict = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ : Optional[Any] = []
for num in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = 0
while 2 * i * i <= odd_composites[num]:
A_ : Tuple = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase__ ) == n:
return list_nums
return []
def a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 1 |
'''simple docstring'''
import math
import sys
def a ( lowerCamelCase__ ):
'''simple docstring'''
if number != int(lowerCamelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A_ : int = [-1] * (number + 1)
A_ : Optional[int] = 0
for i in range(1 , number + 1 ):
A_ : List[str] = sys.maxsize
A_ : Tuple = int(math.sqrt(lowerCamelCase__ ) )
for j in range(1 , root + 1 ):
A_ : Tuple = 1 + answers[i - (j**2)]
A_ : Optional[int] = min(lowerCamelCase__ , lowerCamelCase__ )
A_ : Tuple = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , ):
A_ : List[str] = size if size is not None else {"""shortest_edge""": 20}
A_ : List[str] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : int = parent
A_ : Tuple = batch_size
A_ : Optional[int] = num_channels
A_ : Optional[int] = image_size
A_ : int = min_resolution
A_ : List[str] = max_resolution
A_ : Optional[int] = do_resize
A_ : Union[str, Any] = size
A_ : Union[str, Any] = do_center_crop
A_ : Dict = crop_size
A_ : str = do_flip_channel_order
def _a (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def _a (self ):
A_ : Any = MobileViTImageProcessingTester(self )
@property
def _a (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
self.assertTrue(hasattr(lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase , """center_crop""" ) )
self.assertTrue(hasattr(lowercase , """do_flip_channel_order""" ) )
def _a (self ):
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a (self ):
pass
def _a (self ):
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a (self ):
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a (self ):
# Initialize image_processing
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : str = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 667 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A_ : Dict = 4
A_ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
A_ : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1)) | 667 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCamelCase__ , 2 ) - pow(lowerCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCamelCase__ , 2 ) - pow(lowerCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCamelCase__ , 2 ) + pow(lowerCamelCase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase :Dict = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = 0.9 , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = 1 / 255 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**lowercase )
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : int = do_resize
A_ : Tuple = size
A_ : Optional[int] = crop_pct
A_ : Any = resample
A_ : Tuple = do_center_crop
A_ : List[Any] = crop_size
A_ : Tuple = do_rescale
A_ : Dict = rescale_factor
A_ : Optional[Any] = do_normalize
A_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a (self , lowercase , lowercase , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Optional[int] = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
A_ : str = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A_ : List[Any] = int(size["""height"""] / crop_pct )
else:
A_ : Dict = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase ) )
A_ : Union[str, Any] = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
else:
if "shortest_edge" in size:
A_ : str = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
elif "height" in size and "width" in size:
A_ : Optional[int] = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase ) )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Union[str, Any] = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : int = do_resize if do_resize is not None else self.do_resize
A_ : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
A_ : str = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
A_ : List[str] = image_mean if image_mean is not None else self.image_mean
A_ : Union[str, Any] = image_std if image_std is not None else self.image_std
A_ : Dict = size if size is not None else self.size
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A_ : List[str] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , crop_pct=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : List[str] = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : Any = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : str = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : int = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = KandinskyVaaImgaImgPipeline
__SCREAMING_SNAKE_CASE : Dict = ['image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : Optional[int] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : Tuple = False
@property
def _a (self ):
return 32
@property
def _a (self ):
return 32
@property
def _a (self ):
return self.time_input_dim
@property
def _a (self ):
return self.time_input_dim * 4
@property
def _a (self ):
return 100
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Dict = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : List[str] = UNetaDConditionModel(**lowercase )
return model
@property
def _a (self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a (self ):
torch.manual_seed(0 )
A_ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def _a (self ):
A_ : Tuple = self.dummy_unet
A_ : Any = self.dummy_movq
A_ : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A_ : Optional[int] = DDIMScheduler(**lowercase )
A_ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
A_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Any = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((256, 256) )
if str(lowercase ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(lowercase )
else:
A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : List[str] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _a (self ):
A_ : str = """cpu"""
A_ : Optional[int] = self.get_dummy_components()
A_ : Optional[int] = self.pipeline_class(**lowercase )
A_ : List[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Dict = pipe(**self.get_dummy_inputs(lowercase ) )
A_ : int = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
A_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ : Union[str, Any] = """A red cartoon frog, 4k"""
A_ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
A_ : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
A_ : Any = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
A_ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_, A_ : Optional[int] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A_ : Optional[Any] = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
A_ : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase ) | 667 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 1 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _lowerCAmelCase ( tr.AbstractTransform ):
def __init__(self , lowercase = " " ):
A_ : Optional[Any] = sentence_delimiter
def _a (self , lowercase ):
return list(lowercase )
def _a (self , lowercase ):
A_ : int = []
for sent_idx, sentence in enumerate(lowercase ):
chars.extend(self.process_string(lowercase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase :Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :List[str] = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
lowerCamelCase :Any = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _a (self , lowercase , lowercase , lowercase=False ):
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
A_ : Tuple = 0
A_ : List[Any] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Tuple = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 667 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 1 |
'''simple docstring'''
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase__ )
def a ( lowerCamelCase__ = 1 / 1_23_45 ):
'''simple docstring'''
A_ : int = 0
A_ : List[str] = 0
A_ : str = 3
while True:
A_ : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase__ ):
A_ : Any = int(lowerCamelCase__ )
total_partitions += 1
if check_partition_perfect(lowerCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase__ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase :Dict = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = 'esm'
def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase=0.1 , lowercase=0.1 , lowercase=1026 , lowercase=0.02 , lowercase=1E-12 , lowercase="absolute" , lowercase=True , lowercase=None , lowercase=False , lowercase=False , lowercase=None , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
A_ : str = vocab_size
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Dict = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Tuple = max_position_embeddings
A_ : str = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[int] = position_embedding_type
A_ : Any = use_cache
A_ : Optional[int] = emb_layer_norm_before
A_ : Optional[Any] = token_dropout
A_ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
A_ : int = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
A_ : Any = EsmFoldConfig(**lowercase )
A_ : List[str] = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
A_ : Dict = get_default_vocab_list()
else:
A_ : int = vocab_list
else:
A_ : Union[str, Any] = None
A_ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def _a (self ):
A_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
A_ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def _a (self ):
if self.trunk is None:
A_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
A_ : List[Any] = TrunkConfig(**self.trunk )
def _a (self ):
A_ : Dict = asdict(self )
A_ : Union[str, Any] = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1_024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def _a (self ):
if self.structure_module is None:
A_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
A_ : Tuple = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
A_ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
A_ : Dict = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def _a (self ):
A_ : Dict = asdict(self )
A_ : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1E-8
__SCREAMING_SNAKE_CASE : float = 1E5
def _a (self ):
return asdict(self )
def a ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
) | 667 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 1 |
'''simple docstring'''
class _lowerCAmelCase : # Public class to implement a graph
def __init__(self , lowercase , lowercase , lowercase ):
A_ : Dict = row
A_ : Optional[Any] = col
A_ : Union[str, Any] = graph
def _a (self , lowercase , lowercase , lowercase ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _a (self , lowercase , lowercase , lowercase ):
# Checking all 8 elements surrounding nth element
A_ : Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A_ : Tuple = [-1, 0, 1, -1, 1, -1, 0, 1]
A_ : Optional[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowercase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowercase )
def _a (self ): # And finally, count all islands.
A_ : Tuple = [[False for j in range(self.COL )] for i in range(self.ROW )]
A_ : Dict = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowercase , lowercase , lowercase )
count += 1
return count | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = FunnelConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A_ : List[Any] = FunnelBaseModel(lowerCamelCase__ ) if base_model else FunnelModel(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowerCamelCase :List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
) | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 1 |
'''simple docstring'''
lowerCamelCase :int = range(2, 2_0 + 1)
lowerCamelCase :Optional[int] = [1_0**k for k in range(ks[-1] + 1)]
lowerCamelCase :dict[int, dict[int, list[list[int]]]] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = sum(a_i[j] for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ) )
A_ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase__ ) , lowerCamelCase__ ) ) )
A_, A_ : Tuple = 0, 0
A_ : List[str] = n - i
A_ : Optional[Any] = memo.get(lowerCamelCase__ )
if sub_memo is not None:
A_ : Dict = sub_memo.get(lowerCamelCase__ )
if jumps is not None and len(lowerCamelCase__ ) > 0:
# find and make the largest jump without going over
A_ : Any = -1
for _k in range(len(lowerCamelCase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ : Tuple = _k
break
if max_jump >= 0:
A_, A_, A_ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ : str = diff + c
for j in range(min(lowerCamelCase__ , len(lowerCamelCase__ ) ) ):
A_, A_ : Union[str, Any] = divmod(lowerCamelCase__ , 10 )
if new_c > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = []
else:
A_ : Tuple = {c: []}
A_ : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_, A_ : Tuple = next_term(lowerCamelCase__ , k - 1 , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_, A_ : Optional[Any] = compute(lowerCamelCase__ , lowerCamelCase__ , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
A_ : Dict = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ : int = 0
while j < len(lowerCamelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase__ , (diff, dn, k) )
return (diff, dn)
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(lowerCamelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ : Any = i
A_, A_, A_ : Union[str, Any] = 0, 0, 0
for j in range(len(lowerCamelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ : Tuple = ds_c + ds_b
diff += addend
A_ : str = 0
for j in range(lowerCamelCase__ ):
A_ : Optional[int] = a_i[j] + addend
A_, A_ : Optional[Any] = divmod(lowerCamelCase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return diff, i - start_i
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
A_ : List[Any] = digits[j] + addend
if s >= 10:
A_, A_ : List[str] = divmod(lowerCamelCase__ , 10 )
A_ : Union[str, Any] = addend // 10 + quotient
else:
A_ : int = s
A_ : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
A_, A_ : List[Any] = divmod(lowerCamelCase__ , 10 )
digits.append(lowerCamelCase__ )
def a ( lowerCamelCase__ = 10**15 ):
'''simple docstring'''
A_ : Optional[int] = [1]
A_ : Optional[int] = 1
A_ : Any = 0
while True:
A_, A_ : Union[str, Any] = next_term(lowerCamelCase__ , 20 , i + dn , lowerCamelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ : str = 0
for j in range(len(lowerCamelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase = 16 , lowercase = 88 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = 32 , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = "geglu" , lowercase = None , ):
super().__init__()
A_ : Tuple = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A_ : int = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A_ : List[str] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A_ : Any = [1, 0]
def _a (self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase = True , ):
A_ : Tuple = hidden_states
A_ : Union[str, Any] = []
A_ : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A_ : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A_ : Any = self.transformer_index_for_condition[i]
A_ : Optional[Any] = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A_ : Union[str, Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A_ : Tuple = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 667 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase :List[str] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase :List[Any] = TaTokenizerFast
lowerCamelCase :Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase :Any = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
) | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A_ : Union[str, Any] = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def _a (self ):
A_ : List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A_ : Optional[Any] = get_activation("""gelu""" )
A_ : Optional[Any] = get_activation("""gelu_10""" )
A_ : Dict = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _a (self ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(lowercase ):
get_activation("""bogus""" )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def _a (self ):
A_ : str = get_activation("""gelu""" )
A_ : int = 1
A_ : Dict = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : List[Any] = acta.a | 667 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2 ):
A_ : Union[str, Any] = bp_numa
A_ : List[str] = bp_numa
A_ : Any = bp_numa
A_ : str = conva_get[:2]
A_ : Optional[Any] = conva_get[2]
A_ : str = size_pa
A_ : Optional[int] = rate_w
A_ : Dict = rate_t
A_ : Tuple = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A_ : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : Any = -2 * np.random.rand(self.conva[1] ) + 1
A_ : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
A_ : int = -2 * np.random.rand(self.num_bpa ) + 1
def _a (self , lowercase ):
# save model dict with pickle
A_ : Tuple = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase , """wb""" ) as f:
pickle.dump(lowercase , lowercase )
print(F'Model saved: {save_path}' )
@classmethod
def _a (cls , lowercase ):
# read saved model
with open(lowercase , """rb""" ) as f:
A_ : List[Any] = pickle.load(lowercase ) # noqa: S301
A_ : List[str] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
A_ : List[str] = model_dic.get("""size_pooling1""" )
A_ : Any = model_dic.get("""num_bp1""" )
A_ : str = model_dic.get("""num_bp2""" )
A_ : List[Any] = model_dic.get("""num_bp3""" )
A_ : Dict = model_dic.get("""rate_weight""" )
A_ : str = model_dic.get("""rate_thre""" )
# create model instance
A_ : Optional[Any] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# modify model parameter
A_ : Any = model_dic.get("""w_conv1""" )
A_ : Dict = model_dic.get("""wkj""" )
A_ : Union[str, Any] = model_dic.get("""vji""" )
A_ : List[str] = model_dic.get("""thre_conv1""" )
A_ : List[str] = model_dic.get("""thre_bp2""" )
A_ : Optional[int] = model_dic.get("""thre_bp3""" )
return conv_ins
def _a (self , lowercase ):
return 1 / (1 + np.exp(-1 * x ))
def _a (self , lowercase ):
return round(lowercase , 3 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
# convolution process
A_ : Dict = convs[0]
A_ : List[Any] = convs[1]
A_ : int = np.shape(lowercase )[0]
# get the data slice of original image data, data_focus
A_ : Optional[int] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase ):
A_ : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
A_ : Any = []
A_ : Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase ):
A_ : Any = []
for i_focus in range(len(lowercase ) ):
A_ : str = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase ) )
A_ : Dict = np.asmatrix(lowercase ).reshape(
lowercase , lowercase )
data_featuremap.append(lowercase )
# expanding the data slice to One dimenssion
A_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase ) )
A_ : Tuple = np.asarray(lowercase )
return focus_list, data_featuremap
def _a (self , lowercase , lowercase , lowercase="average_pool" ):
# pooling process
A_ : List[Any] = len(featuremaps[0] )
A_ : str = int(size_map / size_pooling )
A_ : Union[str, Any] = []
for i_map in range(len(lowercase ) ):
A_ : Tuple = featuremaps[i_map]
A_ : Optional[int] = []
for i_focus in range(0 , lowercase , lowercase ):
for j_focus in range(0 , lowercase , lowercase ):
A_ : Union[str, Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase ) )
A_ : Union[str, Any] = np.asmatrix(lowercase ).reshape(lowercase , lowercase )
featuremap_pooled.append(lowercase )
return featuremap_pooled
def _a (self , lowercase ):
# expanding three dimension data to one dimension list
A_ : List[Any] = []
for i in range(len(lowercase ) ):
A_ : Optional[int] = np.shape(data[i] )
A_ : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
A_ : Union[str, Any] = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase )
A_ : List[str] = np.asarray(lowercase )
return data_expanded
def _a (self , lowercase ):
# expanding matrix to one dimension list
A_ : Tuple = np.asarray(lowercase )
A_ : List[str] = np.shape(lowercase )
A_ : Dict = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[Any] = []
A_ : List[Any] = 0
for i_map in range(lowercase ):
A_ : Optional[int] = np.ones((size_map, size_map) )
for i in range(0 , lowercase , lowercase ):
for j in range(0 , lowercase , lowercase ):
A_ : Union[str, Any] = pd_pool[
i_pool
]
A_ : str = i_pool + 1
A_ : Tuple = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase )
return pd_all
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase )) )
A_ : int = 0
A_ : int = []
A_ : Tuple = 10000
while rp < n_repeat and mse >= error_accuracy:
A_ : Optional[int] = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
A_ : Union[str, Any] = np.asmatrix(datas_train[p] )
A_ : Tuple = np.asarray(datas_teach[p] )
A_, A_ : int = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : List[str] = self.pooling(lowercase , self.size_poolinga )
A_ : Any = np.shape(lowercase )
A_ : Optional[Any] = self._expand(lowercase )
A_ : Tuple = data_bp_input
A_ : int = np.dot(lowercase , self.vji.T ) - self.thre_bpa
A_ : List[Any] = self.sig(lowercase )
A_ : Optional[int] = np.dot(lowercase , self.wkj.T ) - self.thre_bpa
A_ : Tuple = self.sig(lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A_ : Optional[int] = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Union[str, Any] = np.multiply(
np.dot(lowercase , self.wkj ) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : int = np.dot(lowercase , self.vji )
A_ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
A_ : Dict = pd_conva_pooled.T.getA().tolist()
A_ : Optional[int] = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
A_ : Tuple = self.rate_weight * np.dot(lowercase , lowercase )
A_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A_ : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A_ : Dict = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A_ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
A_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A_ : Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A_ : List[str] = rp + 1
A_ : Optional[Any] = error_count / patterns
all_mse.append(lowercase )
def draw_error():
A_ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase , """+-""" )
plt.plot(lowercase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def _a (self , lowercase ):
# model predict
A_ : Optional[int] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase )) )
for p in range(len(lowercase ) ):
A_ : Optional[Any] = np.asmatrix(datas_test[p] )
A_, A_ : Any = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Any = self.pooling(lowercase , self.size_poolinga )
A_ : Optional[Any] = self._expand(lowercase )
A_ : List[str] = data_bp_input
A_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
A_ : Any = self.sig(lowercase )
A_ : int = bp_outa * self.wkj.T - self.thre_bpa
A_ : Tuple = self.sig(lowercase )
produce_out.extend(bp_outa.getA().tolist() )
A_ : str = [list(map(self.do_round , lowercase ) ) for each in produce_out]
return np.asarray(lowercase )
def _a (self , lowercase ):
# return the data of image after convoluting process so we can check it out
A_ : str = np.asmatrix(lowercase )
A_, A_ : str = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : List[str] = self.pooling(lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 667 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
) | 667 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
lowerCamelCase :str = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCamelCase :int = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :List[Any] = 0xE000
lowerCamelCase :Tuple = 0xE001
lowerCamelCase :Optional[Any] = 0xE002
lowerCamelCase :List[str] = 0xE003
lowerCamelCase :Dict = 0xE004
# Maps special codepoints to human-readable names.
lowerCamelCase :Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCamelCase :Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=False , lowercase=2048 , **lowercase , ):
A_ : str = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
A_ : int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
A_ : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
A_ : Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
A_ : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , model_max_length=lowercase , **lowercase , )
# Creates a mapping for looking up the IDs of special symbols.
A_ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
A_ : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
A_ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
A_ : str = UNICODE_VOCAB_SIZE
A_ : Tuple = len(self._special_codepoints )
@property
def _a (self ):
return self._unicode_vocab_size
def _a (self , lowercase ):
return list(lowercase )
def _a (self , lowercase ):
try:
return ord(lowercase )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def _a (self , lowercase ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def _a (self , lowercase ):
return "".join(lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : Any = [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : List[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
A_ : str = [1] + ([0] * len(lowercase )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase )) + [1]
return result
def _a (self , lowercase , lowercase = None ):
A_ : str = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
A_ : Any = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _a (self , lowercase , lowercase = None ):
return () | 667 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = np.full((len(lowerCamelCase__ ), sequence_length, 2) , lowerCamelCase__ )
else:
A_ : List[str] = np.full((len(lowerCamelCase__ ), sequence_length) , lowerCamelCase__ )
for i, tensor in enumerate(lowerCamelCase__ ):
if padding_side == "right":
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = tensor[:sequence_length]
else:
A_ : str = tensor[:sequence_length]
else:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : List[str] = tensor[:sequence_length]
else:
A_ : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = ord(lowerCamelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
A_ : int = unicodedata.category(lowerCamelCase__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
__SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : int = -100
__SCREAMING_SNAKE_CASE : str = "pt"
def _a (self , lowercase ):
import torch
A_ : str = """label""" if """label""" in features[0].keys() else """labels"""
A_ : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
A_ : str = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
A_ : int = torch.tensor(batch["""entity_ids"""] ).shape[1]
A_ : Tuple = self.tokenizer.padding_side
if padding_side == "right":
A_ : Tuple = [
list(lowercase ) + [self.label_pad_token_id] * (sequence_length - len(lowercase )) for label in labels
]
else:
A_ : int = [
[self.label_pad_token_id] * (sequence_length - len(lowercase )) + list(lowercase ) for label in labels
]
A_ : Dict = [feature["""ner_tags"""] for feature in features]
A_ : Union[str, Any] = padding_tensor(lowercase , -1 , lowercase , lowercase )
A_ : Tuple = [feature["""original_entity_spans"""] for feature in features]
A_ : Optional[int] = padding_tensor(lowercase , (-1, -1) , lowercase , lowercase )
A_ : Any = {k: torch.tensor(lowercase , dtype=torch.intaa ) for k, v in batch.items()}
return batch | 667 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 1 |
'''simple docstring'''
from collections import deque
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = len(lowerCamelCase__ )
A_ : int = deque()
A_ : Dict = [False for _ in range(lowerCamelCase__ )]
A_ : Dict = [-1 for _ in range(lowerCamelCase__ )]
A_ : List[str] = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = index # the number when this node is seen
A_ : str = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
A_ : Dict = True
for w in g[v]:
if index_of[w] == -1:
A_ : int = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : Tuple = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A_ : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A_ : Optional[Any] = []
A_ : Optional[Any] = stack.pop()
A_ : Tuple = False
component.append(lowerCamelCase__ )
while w != v:
A_ : Optional[int] = stack.pop()
A_ : Optional[int] = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
A_ : Union[str, Any] = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
lowerCamelCase :Optional[int] = 7
lowerCamelCase :int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCamelCase :Any = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCamelCase :Any = [(u, v) for u, v in zip(source, target)]
lowerCamelCase :Union[str, Any] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) | 667 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : Tuple = x_na
A_ : List[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 667 | 1 |
'''simple docstring'''
import os
lowerCamelCase :Dict = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = 0
A_ : Any = 0
while index < len(lowerCamelCase__ ) - 1:
A_ : Any = SYMBOLS[numerals[index]]
A_ : Any = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : str = """"""
A_ : Optional[int] = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ : Tuple = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ : Any = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a ( lowerCamelCase__ = "/p089_roman.txt" ):
'''simple docstring'''
A_ : Any = 0
with open(os.path.dirname(lowerCamelCase__ ) + roman_numerals_filename ) as filea:
A_ : int = filea.readlines()
for line in lines:
A_ : List[str] = line.strip()
A_ : Optional[int] = parse_roman_numerals(lowerCamelCase__ )
A_ : Optional[int] = generate_roman_numerals(lowerCamelCase__ )
savings += len(lowerCamelCase__ ) - len(lowerCamelCase__ )
return savings
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Dict = size if size is not None else {"""shortest_edge""": 224}
A_ : List[str] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name="""crop_size""" )
A_ : str = do_resize
A_ : str = size
A_ : List[str] = resample
A_ : Any = do_center_crop
A_ : Union[str, Any] = crop_size
A_ : List[Any] = do_rescale
A_ : List[Any] = rescale_factor
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Union[str, Any] = do_convert_rgb
def _a (self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : List[str] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , param_name="""size""" , default_to_square=lowercase )
A_ : int = resample if resample is not None else self.resample
A_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Dict = get_size_dict(lowercase , param_name="""crop_size""" , default_to_square=lowercase )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Any = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : int = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : int = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 667 | 1 |
'''simple docstring'''
lowerCamelCase :Optional[Any] = 8.314_462 # Unit - J mol-1 K-1
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 667 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
@register_to_config
def __init__(self , lowercase = 32 , lowercase = 64 , lowercase = 20 , lowercase = 768 , lowercase=77 , lowercase=4 , lowercase = 0.0 , lowercase = "silu" , lowercase = None , lowercase = None , lowercase = "linear" , lowercase = "prd" , lowercase = None , lowercase = None , lowercase = None , ):
super().__init__()
A_ : int = num_attention_heads
A_ : Union[str, Any] = attention_head_dim
A_ : int = num_attention_heads * attention_head_dim
A_ : List[Any] = additional_embeddings
A_ : List[str] = time_embed_dim or inner_dim
A_ : Any = embedding_proj_dim or embedding_dim
A_ : Dict = clip_embed_dim or embedding_dim
A_ : Optional[int] = Timesteps(lowercase , lowercase , 0 )
A_ : List[Any] = TimestepEmbedding(lowercase , lowercase , out_dim=lowercase , act_fn=lowercase )
A_ : List[Any] = nn.Linear(lowercase , lowercase )
if embedding_proj_norm_type is None:
A_ : Optional[int] = None
elif embedding_proj_norm_type == "layer":
A_ : int = nn.LayerNorm(lowercase )
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
A_ : Optional[int] = nn.Linear(lowercase , lowercase )
if encoder_hid_proj_type is None:
A_ : Optional[Any] = None
elif encoder_hid_proj_type == "linear":
A_ : List[Any] = nn.Linear(lowercase , lowercase )
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
A_ : Optional[Any] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowercase ) )
if added_emb_type == "prd":
A_ : int = nn.Parameter(torch.zeros(1 , 1 , lowercase ) )
elif added_emb_type is None:
A_ : int = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
A_ : Tuple = nn.ModuleList(
[
BasicTransformerBlock(
lowercase , lowercase , lowercase , dropout=lowercase , activation_fn="""gelu""" , attention_bias=lowercase , )
for d in range(lowercase )
] )
if norm_in_type == "layer":
A_ : List[Any] = nn.LayerNorm(lowercase )
elif norm_in_type is None:
A_ : Dict = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' )
A_ : str = nn.LayerNorm(lowercase )
A_ : List[Any] = nn.Linear(lowercase , lowercase )
A_ : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
A_ : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , lowercase , persistent=lowercase )
A_ : Dict = nn.Parameter(torch.zeros(1 , lowercase ) )
A_ : List[str] = nn.Parameter(torch.zeros(1 , lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a (self ):
A_ : Tuple = {}
def fn_recursive_add_processors(lowercase , lowercase , lowercase ):
if hasattr(lowercase , """set_processor""" ):
A_ : Optional[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , lowercase , lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase , lowercase , lowercase )
return processors
def _a (self , lowercase ):
A_ : str = len(self.attn_processors.keys() )
if isinstance(lowercase , lowercase ) and len(lowercase ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(lowercase )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(lowercase , lowercase , lowercase ):
if hasattr(lowercase , """set_processor""" ):
if not isinstance(lowercase , lowercase ):
module.set_processor(lowercase )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , lowercase , lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase , lowercase , lowercase )
def _a (self ):
self.set_attn_processor(AttnProcessor() )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = True , ):
A_ : Optional[int] = hidden_states.shape[0]
A_ : Dict = timestep
if not torch.is_tensor(lowercase ):
A_ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
A_ : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : Tuple = timesteps * torch.ones(lowercase , dtype=timesteps.dtype , device=timesteps.device )
A_ : Any = self.time_proj(lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A_ : List[Any] = timesteps_projected.to(dtype=self.dtype )
A_ : Optional[Any] = self.time_embedding(lowercase )
if self.embedding_proj_norm is not None:
A_ : Dict = self.embedding_proj_norm(lowercase )
A_ : Any = self.embedding_proj(lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A_ : Any = self.encoder_hidden_states_proj(lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A_ : List[str] = self.proj_in(lowercase )
A_ : List[Any] = self.positional_embedding.to(hidden_states.dtype )
A_ : Optional[Any] = []
A_ : Union[str, Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A_ : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A_ : List[Any] = hidden_states[:, None, :]
A_ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A_ : str = self.prd_embedding.to(hidden_states.dtype ).expand(lowercase , -1 , -1 )
additional_embeds.append(lowercase )
A_ : Any = torch.cat(
lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A_ : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A_ : Union[str, Any] = F.pad(
lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
A_ : List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
A_ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
A_ : Union[str, Any] = F.pad(lowercase , (0, self.additional_embeddings) , value=0.0 )
A_ : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A_ : Union[str, Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
A_ : List[str] = self.norm_in(lowercase )
for block in self.transformer_blocks:
A_ : Dict = block(lowercase , attention_mask=lowercase )
A_ : Optional[int] = self.norm_out(lowercase )
if self.prd_embedding is not None:
A_ : Dict = hidden_states[:, -1]
else:
A_ : Optional[Any] = hidden_states[:, additional_embeddings_len:]
A_ : Optional[Any] = self.proj_to_clip_embeddings(lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowercase )
def _a (self , lowercase ):
A_ : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 667 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 1 |
'''simple docstring'''
import math
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = len(lowerCamelCase__ )
A_ : Any = int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
A_ : Optional[int] = 0
while arr[min(lowerCamelCase__ , lowerCamelCase__ ) - 1] < x:
A_ : List[Any] = step
step += int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
A_ : int = prev + 1
if prev == min(lowerCamelCase__ , lowerCamelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase :List[str] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase :int = [int(item) for item in user_input.split(''',''')]
lowerCamelCase :Union[str, Any] = int(input('''Enter the number to be searched:\n'''))
lowerCamelCase :Optional[Any] = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F"Number {x} is at index {res}") | 667 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 1 |
'''simple docstring'''
import operator as op
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
A_ : Dict = lambda lowerCamelCase__ , lowerCamelCase__ : int(x / y ) # noqa: E731 integer division operation
A_ : str = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(lowerCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(lowerCamelCase__ ) , sep=""" | """ )
else:
A_ : Tuple = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(lowerCamelCase__ ) , sep=""" | """ )
A_ : List[Any] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(lowerCamelCase__ ) , sep=""" | """ )
stack.append(
str(opr[x](int(lowerCamelCase__ ) , int(lowerCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(lowerCamelCase__ ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase :List[str] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix)) | 667 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = VQModel
__SCREAMING_SNAKE_CASE : Any = 'sample'
@property
def _a (self , lowercase=(32, 32) ):
A_ : List[str] = 4
A_ : Any = 3
A_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
return {"sample": image}
@property
def _a (self ):
return (3, 32, 32)
@property
def _a (self ):
return (3, 32, 32)
def _a (self ):
A_ : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
A_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
pass
def _a (self ):
pass
def _a (self ):
A_, A_ : Tuple = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase )
A_ : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a (self ):
A_ : Tuple = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(lowercase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
A_ : str = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
A_ : str = image.to(lowercase )
with torch.no_grad():
A_ : Optional[Any] = model(lowercase ).sample
A_ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ : str = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) ) | 667 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase :int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase :Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def a ( ):
'''simple docstring'''
A_ : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
A_ : List[Any] = bs[:]
A_ : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
A_ : Any = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = set()
A_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : Dict = char
return pairs
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
def __init__(self , lowercase , lowercase , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , **lowercase , ):
A_ : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
A_ : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
A_ : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
A_ : Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
A_ : List[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else unk_token
A_ : str = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
errors=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , **lowercase , )
with open(lowercase , encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(lowercase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : Tuple = errors # how to handle errors in decoding
A_ : Any = bytes_to_unicode()
A_ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase , encoding="""utf-8""" ) as merges_handle:
A_ : Tuple = merges_handle.read().split("""\n""" )[1:-1]
A_ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
A_ : Any = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : Any = {}
A_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Dict = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _a (self ):
return len(self.encoder )
def _a (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _a (self , lowercase ):
if token in self.cache:
return self.cache[token]
A_ : str = tuple(lowercase )
A_ : Optional[Any] = get_pairs(lowercase )
if not pairs:
return token
while True:
A_ : Union[str, Any] = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_, A_ : str = bigram
A_ : str = []
A_ : Optional[Any] = 0
while i < len(lowercase ):
try:
A_ : Optional[int] = word.index(lowercase , lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : List[str] = j
if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : Tuple = tuple(lowercase )
A_ : Optional[Any] = new_word
if len(lowercase ) == 1:
break
else:
A_ : int = get_pairs(lowercase )
A_ : str = """ """.join(lowercase )
A_ : Optional[Any] = word
return word
def _a (self , lowercase ):
A_ : List[Any] = []
for token in re.findall(self.pat , lowercase ):
A_ : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase ).split(""" """ ) )
return bpe_tokens
def _a (self , lowercase ):
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def _a (self , lowercase ):
return self.decoder.get(lowercase )
def _a (self , lowercase ):
A_ : Union[str, Any] = """""".join(lowercase )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : List[Any] = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : str = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + """\n""" )
A_ : Tuple = 0
with open(lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Tuple = token_index
writer.write(""" """.join(lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a (self , lowercase , lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
A_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def _a (self , lowercase , lowercase = None ):
A_ : Dict = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , lowercase , lowercase=False , **lowercase ):
A_ : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
def _a (self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
A_ : Optional[Any] = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
A_ : int = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ : List[str] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase )
if needs_to_be_padded:
A_ : str = len(lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ : int = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A_ : Optional[int] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 667 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 1 |
'''simple docstring'''
import cmath
import math
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = math.radians(lowerCamelCase__ )
A_ : List[str] = math.radians(lowerCamelCase__ )
# Convert voltage and current to rectangular form
A_ : Optional[Any] = cmath.rect(lowerCamelCase__ , lowerCamelCase__ )
A_ : Tuple = cmath.rect(lowerCamelCase__ , lowerCamelCase__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def _a (lowercase ):
raise NotImplementedError()
@abstractmethod
def _a (self ):
raise NotImplementedError() | 667 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 1 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCamelCase :Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = list(s_dict.keys() )
for key in keys:
A_ : Any = r""".*/layers_(\d+)"""
A_ : List[str] = key
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
A_ : List[Any] = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , lowerCamelCase__ )
A_ : str = r"""(encoder|decoder)\/"""
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Any = re.match(lowerCamelCase__ , lowerCamelCase__ ).groups()
if groups[0] == "encoder":
A_ : List[Any] = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , lowerCamelCase__ )
A_ : Union[str, Any] = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , lowerCamelCase__ )
elif groups[0] == "decoder":
A_ : str = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , lowerCamelCase__ )
A_ : Dict = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , lowerCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ : List[str] = new_key.replace(lowerCamelCase__ , lowerCamelCase__ )
print(f'{key} -> {new_key}' )
A_ : List[str] = s_dict.pop(lowerCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ : Optional[Any] = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ : Tuple = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ : str = s_dict[key].shape[0]
A_ : int = s_dict[key]
for idx in range(lowerCamelCase__ ):
A_ : str = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCamelCase__ )
return s_dict
lowerCamelCase :Any = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
import regex as re
with open(lowerCamelCase__ , """r""" ) as f:
A_ : Tuple = f.read()
A_ : Optional[int] = re.findall(r"""(.*) = ([0-9.]*)""" , lowerCamelCase__ )
A_ : int = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ : Union[str, Any] = float(lowerCamelCase__ ) if """.""" in value else int(lowerCamelCase__ )
A_ : Any = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , lowerCamelCase__ )[0]
A_ : Optional[int] = str(activation[1] )
A_ : List[str] = num_experts
A_ : List[str] = SwitchTransformersConfig(**lowerCamelCase__ )
return config
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="./" , lowerCamelCase__=8 ):
'''simple docstring'''
print(f'Loading flax weights from : {flax_checkpoint_path}' )
A_ : Optional[int] = checkpoints.load_tax_checkpoint(lowerCamelCase__ )
if gin_file is not None:
A_ : Any = convert_gin_to_config(lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : Union[str, Any] = SwitchTransformersConfig.from_pretrained(lowerCamelCase__ )
A_ : List[str] = SwitchTransformersForConditionalGeneration(lowerCamelCase__ )
A_ : Dict = flax_params["""target"""]
A_ : Optional[Any] = flatten_dict(lowerCamelCase__ , sep="""/""" )
A_ : List[str] = rename_keys(lowerCamelCase__ )
A_ : List[str] = unflatten_dict(lowerCamelCase__ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCamelCase__ , lowerCamelCase__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
lowerCamelCase :Dict = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
) | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
import requests
lowerCamelCase :List[Any] = '''''' # <-- Put your OpenWeatherMap appid here!
lowerCamelCase :Union[str, Any] = '''https://api.openweathermap.org/data/2.5/'''
def a ( lowerCamelCase__ = "Chicago" , lowerCamelCase__ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def a ( lowerCamelCase__ = "Kolkata, India" , lowerCamelCase__ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def a ( lowerCamelCase__ = 55.68 , lowerCamelCase__ = 12.57 , lowerCamelCase__ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCamelCase :List[Any] = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break | 667 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A_ : Tuple = 1
A_ : Optional[int] = 1
while repunit:
A_ : Optional[int] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def a ( lowerCamelCase__ = 1_00_00_00 ):
'''simple docstring'''
A_ : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 1 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 1
@register_to_config
def __init__(self , lowercase = 2000 , lowercase = 0.15 , lowercase = 0.01 , lowercase = 13_48.0 , lowercase = 1E-5 , lowercase = 1 , ):
# standard deviation of the initial noise distribution
A_ : Optional[int] = sigma_max
# setable values
A_ : Dict = None
self.set_sigmas(lowercase , lowercase , lowercase , lowercase )
def _a (self , lowercase , lowercase = None ):
return sample
def _a (self , lowercase , lowercase = None , lowercase = None ):
A_ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : List[str] = torch.linspace(1 , lowercase , lowercase , device=lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None ):
A_ : Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Union[str, Any] = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase , lowercase )
A_ : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Optional[Any] = torch.exp(torch.linspace(math.log(lowercase ) , math.log(lowercase ) , lowercase ) )
A_ : List[str] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _a (self , lowercase , lowercase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , lowercase = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
A_ : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : List[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : List[Any] = timesteps.to(self.discrete_sigmas.device )
A_ : List[Any] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : Dict = self.get_adjacent_sigma(lowercase , lowercase ).to(sample.device )
A_ : str = torch.zeros_like(lowercase )
A_ : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Dict = diffusion.unsqueeze(-1 )
A_ : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : Dict = randn_tensor(
sample.shape , layout=sample.layout , generator=lowercase , device=sample.device , dtype=sample.dtype )
A_ : str = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase , prev_sample_mean=lowercase )
def _a (self , lowercase , lowercase , lowercase = None , lowercase = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Any = randn_tensor(sample.shape , layout=sample.layout , generator=lowercase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : Optional[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : int = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : List[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : Any = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : Optional[Any] = step_size.unsqueeze(-1 )
A_ : Dict = sample + step_size * model_output
A_ : Dict = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase )
def _a (self , lowercase , lowercase , lowercase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A_ : str = timesteps.to(original_samples.device )
A_ : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : str = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase ) * sigmas[:, None, None, None]
)
A_ : Tuple = noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps | 667 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 1 |
'''simple docstring'''
import sys
lowerCamelCase :Union[str, Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a ( lowerCamelCase__ = N ):
'''simple docstring'''
A_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(lowerCamelCase__ ) - 12 ):
A_ : Dict = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ : int = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCamelCase :Any = 1_0
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
if array[i] == target:
return i
return -1
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = 0
A_ : Tuple = len(lowerCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[int] = (left + right) // 3 + 1
A_ : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A_ : List[Any] = one_third - 1
elif array[two_third] < target:
A_ : Dict = two_third + 1
else:
A_ : Tuple = one_third + 1
A_ : Any = two_third - 1
else:
return -1
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = (left + right) // 3 + 1
A_ : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCamelCase__ , one_third - 1 , lowerCamelCase__ , lowerCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCamelCase__ , lowerCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :Union[str, Any] = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase :Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCamelCase :Any = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase :Dict = ite_ternary_search(collection, target)
lowerCamelCase :List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''') | 667 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :str = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 667 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertTokenizer
__SCREAMING_SNAKE_CASE : Dict = DistilBertTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
@slow
def _a (self ):
A_ : Optional[Any] = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
A_ : List[str] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
A_ : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 667 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase :Union[str, Any] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'mask2former'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['swin']
__SCREAMING_SNAKE_CASE : Any = {'hidden_size': 'hidden_dim'}
def __init__(self , lowercase = None , lowercase = 256 , lowercase = 256 , lowercase = 256 , lowercase = 1024 , lowercase = "relu" , lowercase = 6 , lowercase = 10 , lowercase = 8 , lowercase = 0.0 , lowercase = 2048 , lowercase = False , lowercase = False , lowercase = 4 , lowercase = 255 , lowercase = 100 , lowercase = 0.1 , lowercase = 2.0 , lowercase = 5.0 , lowercase = 5.0 , lowercase = 12544 , lowercase = 3.0 , lowercase = 0.75 , lowercase = 0.02 , lowercase = 1.0 , lowercase = True , lowercase = [4, 8, 16, 32] , lowercase = None , **lowercase , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
A_ : Dict = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowercase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowercase , lowercase ):
A_ : List[Any] = backbone_config.pop("""model_type""" )
A_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
A_ : List[Any] = config_class.from_dict(lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
F'Supported model types: {",".join(self.backbones_supported )}' )
A_ : int = backbone_config
A_ : str = feature_size
A_ : Tuple = mask_feature_size
A_ : List[Any] = hidden_dim
A_ : Union[str, Any] = encoder_feedforward_dim
A_ : Tuple = activation_function
A_ : str = encoder_layers
A_ : List[str] = decoder_layers
A_ : Tuple = num_attention_heads
A_ : Any = dropout
A_ : Union[str, Any] = dim_feedforward
A_ : str = pre_norm
A_ : int = enforce_input_projection
A_ : Tuple = common_stride
A_ : List[str] = ignore_value
A_ : Tuple = num_queries
A_ : List[str] = no_object_weight
A_ : List[str] = class_weight
A_ : Dict = mask_weight
A_ : Union[str, Any] = dice_weight
A_ : Tuple = train_num_points
A_ : List[str] = oversample_ratio
A_ : int = importance_sample_ratio
A_ : Dict = init_std
A_ : str = init_xavier_std
A_ : int = use_auxiliary_loss
A_ : Tuple = feature_strides
A_ : Any = output_auxiliary_logits
A_ : Any = decoder_layers
super().__init__(**lowercase )
@classmethod
def _a (cls , lowercase , **lowercase ):
return cls(
backbone_config=lowercase , **lowercase , )
def _a (self ):
A_ : Optional[int] = copy.deepcopy(self.__dict__ )
A_ : Optional[Any] = self.backbone_config.to_dict()
A_ : Tuple = self.__class__.model_type
return output | 667 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.