code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = 0
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(_lowercase ) / "preprocessor_config.json"
lowercase__ = Path(_lowercase ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
json.dump({"model_type": "clip"} , open(_lowercase , "w" ) )
lowercase__ = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(_lowercase ) / "preprocessor_config.json"
lowercase__ = Path(_lowercase ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
json.dump({"model_type": "clip"} , open(_lowercase , "w" ) )
lowercase__ = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase__ = Path(_lowercase ) / "preprocessor_config.json"
lowercase__ = Path(_lowercase ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
json.dump({"model_type": "clip"} , open(_lowercase , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase__ = AutoImageProcessor.from_pretrained(_lowercase ).to_dict()
config_dict.pop("image_processor_type" )
lowercase__ = CLIPImageProcessor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
lowercase__ = AutoImageProcessor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
lowercase__ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(_lowercase ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
lowercase__ = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , "clip-base is not a local folder and is not a valid model identifier" ):
lowercase__ = AutoImageProcessor.from_pretrained("clip-base" )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = AutoImageProcessor.from_pretrained(_lowercase , revision="aaaaaa" )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
_lowercase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
lowercase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
with self.assertRaises(_lowercase ):
lowercase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
lowercase__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=_lowercase )
lowercase__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
lowercase__ = AutoImageProcessor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register("custom" , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoImageProcessor.register(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(_lowercase ) / "preprocessor_config.json"
lowercase__ = Path(_lowercase ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(_lowercase , "w" ) , )
json.dump({"model_type": "clip"} , open(_lowercase , "w" ) )
lowercase__ = CustomImageProcessor.from_pretrained(_lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
lowercase__ = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = True
try:
AutoConfig.register("custom" , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
lowercase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(_lowercase , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self :str , _lowercase :int , _lowercase :Optional[int]=13 , _lowercase :Union[str, Any]=7 , _lowercase :List[Any]=True , _lowercase :List[str]=True , _lowercase :List[Any]=True , _lowercase :List[Any]=True , _lowercase :Tuple=99 , _lowercase :Union[str, Any]=32 , _lowercase :str=5 , _lowercase :Tuple=4 , _lowercase :str=37 , _lowercase :Tuple="gelu" , _lowercase :Union[str, Any]=0.1 , _lowercase :Dict=0.1 , _lowercase :List[str]=5_12 , _lowercase :str=16 , _lowercase :Tuple=2 , _lowercase :str=0.02 , _lowercase :List[str]=4 , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( lowercase_ , unittest.TestCase ):
__lowerCamelCase = True
__lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=_lowercase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
lowercase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(_lowercase )[0]
lowercase__ = 5_00_00
lowercase__ = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
lowercase__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
| 655 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class lowerCAmelCase ( lowercase_ , unittest.TestCase ):
__lowerCamelCase = BartphoTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().setUp()
lowercase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowercase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
lowercase__ = BartphoTokenizer(_lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self :str , **_lowercase :str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :List[Any] , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "This is a là test"
lowercase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = BartphoTokenizer(_lowercase , self.monolingual_vocab_file , **self.special_tokens_map )
lowercase__ = "This is a là test"
lowercase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowercase__ = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
| 655 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 1 |
def _A ( __magic_name__ ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowercase__ = gray_code_sequence_string(__magic_name__ )
#
# convert them to integers
for i in range(len(__magic_name__ ) ):
lowercase__ = int(sequence[i] , 2 )
return sequence
def _A ( __magic_name__ ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase__ = gray_code_sequence_string(bit_count - 1 )
lowercase__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase__ = "0" + smaller_sequence[i]
sequence.append(__magic_name__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase__ = "1" + smaller_sequence[i]
sequence.append(__magic_name__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ):
if attention_mask is None:
lowercase__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase :
def __init__( self :Optional[Any] , _lowercase :int , _lowercase :Dict=13 , _lowercase :Dict=7 , _lowercase :List[str]=True , _lowercase :List[Any]=False , _lowercase :int=99 , _lowercase :List[Any]=16 , _lowercase :Dict=2 , _lowercase :List[Any]=4 , _lowercase :Optional[int]=4 , _lowercase :int="gelu" , _lowercase :List[str]=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :Any=32 , _lowercase :int=2 , _lowercase :List[str]=1 , _lowercase :Dict=0 , _lowercase :str=0.02 , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = bos_token_id
lowercase__ = initializer_range
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase__ = shift_tokens_right(_lowercase , 1 , 2 )
lowercase__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , )
lowercase__ = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Optional[int] , _lowercase :Optional[int] , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = 20
lowercase__ = model_class_name(_lowercase )
lowercase__ = model.encode(inputs_dict["input_ids"] )
lowercase__ , lowercase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
lowercase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
lowercase__ = model.decode(_lowercase , _lowercase )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = 20
lowercase__ = model_class_name(_lowercase )
lowercase__ = model.encode(inputs_dict["input_ids"] )
lowercase__ , lowercase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowercase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
lowercase__ = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = 99
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase__ = input_ids.shape[0]
lowercase__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ = self._get_config_and_data()
lowercase__ = FlaxBlenderbotSmallForConditionalGeneration(_lowercase )
lowercase__ = lm_model(input_ids=_lowercase )
lowercase__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase__ = FlaxBlenderbotSmallForConditionalGeneration(_lowercase )
lowercase__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowercase__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowercase__ = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase )
lowercase__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowercase__ = shift_tokens_right(_lowercase , 1 , 2 )
lowercase__ = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
lowercase__ = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowercase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase ( lowercase_ , unittest.TestCase , lowercase_ ):
__lowerCamelCase = True
__lowerCamelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = FlaxBlenderbotSmallModelTester(self )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase :Union[str, Any] , _lowercase :Dict=None , **_lowercase :int ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest("JIT Enabled" ):
lowercase__ = encode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = model_class(_lowercase )
lowercase__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowercase__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase :List[str] , _lowercase :int , _lowercase :Union[str, Any] ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest("JIT Enabled" ):
lowercase__ = decode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase__ = np.ones((1, 1) ) * model.config.eos_token_id
lowercase__ = model(_lowercase )
self.assertIsNotNone(_lowercase )
| 655 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ['image_processor', 'tokenizer']
__lowerCamelCase = 'CLIPImageProcessor'
__lowerCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self :int , _lowercase :Optional[Any]=None , _lowercase :Dict=None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowercase , )
lowercase__ = kwargs.pop("feature_extractor" )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowercase , _lowercase )
def __call__( self :Tuple , _lowercase :Optional[int]=None , _lowercase :int=None , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
lowercase__ = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def UpperCAmelCase ( self :Any , *_lowercase :Dict , **_lowercase :Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[Any] , *_lowercase :Union[str, Any] , **_lowercase :Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowercase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowercase , )
return self.image_processor
| 655 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case = logging.get_logger(__name__)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
# Recurse if needed
if "." in tensor_name:
lowercase__ = tensor_name.split("." )
for split in splits[:-1]:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowercase__ = new_module
lowercase__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase__ = tensor_name in module._buffers
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase__ = False
lowercase__ = False
if is_buffer or not is_bitsandbytes_available():
lowercase__ = False
lowercase__ = False
else:
lowercase__ = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase__ = old_value.to(__magic_name__ )
elif isinstance(__magic_name__ , torch.Tensor ):
lowercase__ = value.to("cpu" )
if value.dtype == torch.inta:
lowercase__ = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
lowercase__ = torch.tensor(__magic_name__ , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __magic_name__ ) and fpaa_statistics is None:
lowercase__ = new_value.T
lowercase__ = old_value.__dict__
if is_abit:
lowercase__ = bnb.nn.IntaParams(__magic_name__ , requires_grad=__magic_name__ , **__magic_name__ ).to(__magic_name__ )
elif is_abit:
lowercase__ = bnb.nn.Paramsabit(__magic_name__ , requires_grad=__magic_name__ , **__magic_name__ ).to(__magic_name__ )
lowercase__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__magic_name__ ) )
else:
if value is None:
lowercase__ = old_value.to(__magic_name__ )
elif isinstance(__magic_name__ , torch.Tensor ):
lowercase__ = value.to(__magic_name__ )
else:
lowercase__ = torch.tensor(__magic_name__ , device=__magic_name__ )
if is_buffer:
lowercase__ = new_value
else:
lowercase__ = nn.Parameter(__magic_name__ , requires_grad=old_value.requires_grad )
lowercase__ = new_value
def _A ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=False ):
for name, module in model.named_children():
if current_key_name is None:
lowercase__ = []
current_key_name.append(__magic_name__ )
if (isinstance(__magic_name__ , nn.Linear ) or isinstance(__magic_name__ , __magic_name__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__magic_name__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = module.weight.shape
else:
lowercase__ = module.in_features
lowercase__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase__ = bnb.nn.LinearabitLt(
__magic_name__ , __magic_name__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase__ = bnb.nn.Linearabit(
__magic_name__ , __magic_name__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase__ = True
# Store the module class in case we need to transpose the weight later
lowercase__ = type(__magic_name__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__magic_name__ )
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ = _replace_with_bnb_linear(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_been_replaced=__magic_name__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _A ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None ):
lowercase__ = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
lowercase__ , lowercase__ = _replace_with_bnb_linear(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def _A ( *__magic_name__ , **__magic_name__ ):
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __magic_name__ , )
return replace_with_bnb_linear(*__magic_name__ , **__magic_name__ )
def _A ( *__magic_name__ , **__magic_name__ ):
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __magic_name__ , )
return set_module_quantized_tensor_to_device(*__magic_name__ , **__magic_name__ )
def _A ( __magic_name__ ):
lowercase__ = deepcopy(__magic_name__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase__ = find_tied_parameters(__magic_name__ )
# For compatibility with Accelerate < 0.18
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ = sum(__magic_name__ , [] )
lowercase__ = len(__magic_name__ ) > 0
# Check if it is a base model
lowercase__ = not hasattr(__magic_name__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ = list(model.named_children() )
lowercase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ = set(__magic_name__ ) - set(__magic_name__ )
lowercase__ = list(set(__magic_name__ ) ) + list(__magic_name__ )
# remove ".weight" from the keys
lowercase__ = [".weight", ".bias"]
lowercase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ = name.replace(__magic_name__ , "" )
filtered_module_names.append(__magic_name__ )
return filtered_module_names
| 655 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 1 |
def _A ( __magic_name__ ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("Input value must be a 'int' type" )
return bin(__magic_name__ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 1 |
import sys
_snake_case = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _A ( __magic_name__ ):
lowercase__ = 1
for digit in s:
product *= int(__magic_name__ )
return product
def _A ( __magic_name__ = N ):
lowercase__ = -sys.maxsize - 1
lowercase__ = n[:13]
lowercase__ = 13
while cur_index < len(__magic_name__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase__ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase__ = max(__magic_name__ , str_eval(__magic_name__ ) )
lowercase__ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowerCamelCase = Features({'audio': Audio()} )
__lowerCamelCase = Features({'transcription': Value('string' )} )
__lowerCamelCase = "audio"
__lowerCamelCase = "transcription"
def UpperCAmelCase ( self :List[str] , _lowercase :Dict ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , _lowercase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
lowercase__ = copy.deepcopy(self )
lowercase__ = self.input_schema.copy()
lowercase__ = features[self.audio_column]
lowercase__ = input_schema
return task_template
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 655 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_snake_case = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_snake_case = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _A ( ):
lowercase__ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase__ = bs[:]
lowercase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__magic_name__ )
cs.append(2**8 + n )
n += 1
lowercase__ = [chr(__magic_name__ ) for n in cs]
return dict(zip(__magic_name__ , __magic_name__ ) )
def _A ( __magic_name__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
return pairs
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self :str , _lowercase :List[str] , _lowercase :Any , _lowercase :Any="replace" , _lowercase :str="<s>" , _lowercase :List[str]="</s>" , _lowercase :List[str]="</s>" , _lowercase :Tuple="<s>" , _lowercase :Any="<unk>" , _lowercase :str="<pad>" , _lowercase :List[str]="<mask>" , _lowercase :Optional[Any]=False , **_lowercase :str , ):
'''simple docstring'''
lowercase__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
lowercase__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
lowercase__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
lowercase__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
lowercase__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
lowercase__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="utf-8" ) as vocab_handle:
lowercase__ = json.load(_lowercase )
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = errors # how to handle errors in decoding
lowercase__ = bytes_to_unicode()
lowercase__ = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="utf-8" ) as merges_handle:
lowercase__ = merges_handle.read().split("\n" )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase__ = {}
lowercase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self :Dict , _lowercase :List[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__ = tuple(_lowercase )
lowercase__ = get_pairs(_lowercase )
if not pairs:
return token
while True:
lowercase__ = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(_lowercase ):
try:
lowercase__ = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(_lowercase )
lowercase__ = new_word
if len(_lowercase ) == 1:
break
else:
lowercase__ = get_pairs(_lowercase )
lowercase__ = " ".join(_lowercase )
lowercase__ = word
return word
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = []
for token in re.findall(self.pat , _lowercase ):
lowercase__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(" " ) )
return bpe_tokens
def UpperCAmelCase ( self :int , _lowercase :Tuple ):
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self :str , _lowercase :Tuple ):
'''simple docstring'''
return self.decoder.get(_lowercase )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = "".join(_lowercase )
lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase ( self :str , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + "\n" )
lowercase__ = 0
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase__ = token_index
writer.write(" ".join(_lowercase ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :List[Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :int=False , **_lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
lowercase__ = " " + text
return (text, kwargs)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self :Dict , _lowercase :"Conversation" ):
'''simple docstring'''
lowercase__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_lowercase )
lowercase__ = " ".join(_lowercase )
lowercase__ = self.encode(_lowercase )
if len(_lowercase ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 655 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowerCAmelCase ( lowercase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCamelCase = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowerCamelCase = Features({'text': Value('string' )} )
__lowerCamelCase = Features({'labels': ClassLabel} )
__lowerCamelCase = "text"
__lowerCamelCase = "labels"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _lowercase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ = copy.deepcopy(self )
lowercase__ = self.label_schema.copy()
lowercase__ = features[self.label_column]
lowercase__ = label_schema
return task_template
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 655 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 1 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase ( lowercase_ ):
pass
class lowerCAmelCase :
def __init__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __iter__( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self
lowercase__ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowercase )
yield node.data
lowercase__ = node.next_node
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_snake_case = Node(1)
_snake_case = Node(2)
_snake_case = Node(3)
_snake_case = Node(4)
print(root_node.has_loop) # False
_snake_case = root_node.next_node
print(root_node.has_loop) # True
_snake_case = Node(5)
_snake_case = Node(6)
_snake_case = Node(5)
_snake_case = Node(6)
print(root_node.has_loop) # False
_snake_case = Node(1)
print(root_node.has_loop) # False
| 655 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 1 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _A ( __magic_name__ , __magic_name__ , __magic_name__=1024 , __magic_name__=1024 , __magic_name__=False , **__magic_name__ ):
lowercase__ = AutoTokenizer.from_pretrained(__magic_name__ )
lowercase__ = SeqaSeqDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , type_path="train" , **__magic_name__ )
lowercase__ = tok.pad_token_id
def get_lens(__magic_name__ ):
lowercase__ = tqdm(
DataLoader(__magic_name__ , batch_size=512 , num_workers=8 , shuffle=__magic_name__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowercase__ = []
for batch in dl:
lowercase__ = batch["input_ids"].ne(__magic_name__ ).sum(1 ).tolist()
lowercase__ = batch["labels"].ne(__magic_name__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__magic_name__ , __magic_name__ ):
max_lens.append(max(__magic_name__ , __magic_name__ ) )
else:
max_lens.extend(__magic_name__ )
return max_lens
lowercase__ = get_lens(__magic_name__ )
lowercase__ = SeqaSeqDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , type_path="val" , **__magic_name__ )
lowercase__ = get_lens(__magic_name__ )
pickle_save(__magic_name__ , train_ds.len_file )
pickle_save(__magic_name__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 655 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_snake_case = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_snake_case = """cuda""" if torch.cuda.is_available() else """cpu"""
def _A ( __magic_name__ , __magic_name__=100 , __magic_name__=" " ):
lowercase__ = text.split(__magic_name__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )]
def _A ( __magic_name__ ):
lowercase__ , lowercase__ = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__magic_name__ ):
titles.append(title if title is not None else "" )
texts.append(__magic_name__ )
return {"title": titles, "text": texts}
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__magic_name__ , padding="longest" , return_tensors="pt" )["input_ids"]
lowercase__ = ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ = dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ )
lowercase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase__ = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
lowercase__ = dataset.map(
partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , )
# And finally save your dataset
lowercase__ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__magic_name__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__magic_name__ )
# And save the index
lowercase__ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__magic_name__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=str(Path(lowercase_ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
__lowerCamelCase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
__lowerCamelCase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
__lowerCamelCase = field(
default=str(Path(lowercase_ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
__lowerCamelCase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_snake_case = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_snake_case = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 655 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_snake_case = parse(importlib.metadata.version("""torch"""))
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
lowercase__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = parse(importlib.metadata.version(__magic_name__ ) )
return operation(__magic_name__ , parse(__magic_name__ ) )
def _A ( __magic_name__ , __magic_name__ ):
return compare_versions(__magic_name__ , __magic_name__ , __magic_name__ )
| 655 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''CLIPImageProcessor'''
a__ = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Union[str, Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Optional[Any] = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 0 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
class __lowerCamelCase :
def __init__( self: Union[str, Any],A_: Tuple ):
'''simple docstring'''
__UpperCamelCase = val
__UpperCamelCase = None
__UpperCamelCase = None
def snake_case_ ( self: Any,A_: List[Any] ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__UpperCamelCase = Node(A_ )
else:
self.left.insert(A_ )
elif val > self.val:
if self.right is None:
__UpperCamelCase = Node(A_ )
else:
self.right.insert(A_ )
else:
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
if root:
inorder(root.left , _lowercase )
res.append(root.val )
inorder(root.right , _lowercase )
def _A ( _lowercase ) -> Optional[int]:
"""simple docstring"""
if len(_lowercase ) == 0:
return arr
__UpperCamelCase = Node(arr[0] )
for i in range(1 , len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__UpperCamelCase = []
inorder(_lowercase , _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 1 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase__ ( datasets.BeamBasedBuilder):
"""simple docstring"""
def snake_case_ ( self : List[Any] ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__lowerCAmelCase , )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> List[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
class lowerCamelCase__ ( datasets.BeamBasedBuilder):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__lowerCAmelCase , )
def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase__ ( _A):
"""simple docstring"""
@require_beam
def snake_case_ ( self : Union[str, Any] ) -> List[str]:
_A = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def snake_case_ ( self : int ) -> str:
import apache_beam as beam
_A = beam.io.parquetio.WriteToParquet
_A = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
_A = partial(__lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = DummyBeamDataset(cache_dir=__lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def snake_case_ ( self : Any ) -> int:
_A = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_A = NestedBeamDataset(cache_dir=__lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
_A = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 2 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A_( A : Optional[Any]=32 , A : List[Any]=10 , A : Union[str, Any]=100 , A : List[str]=1026 , A : List[Any]=True , A : Dict="data/tokenized_stories_train_wikitext103.jbl" , A : Dict="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase , UpperCamelCase = generate_datasets(
A , A , number=A , min_len=1026 , trim=A)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase = load_gpta('gpt2').to(A)
print('computing perplexity on objective set')
UpperCamelCase = compute_perplexity(A , A , A).item()
print('perplexity on objective set:' , A)
# collect igf pairs and save to file demo.jbl
collect_objective_set(A , A , A , A , A , A , A , A)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A_( A : Union[str, Any] , A : Any=15 , A : List[str]=128 , A : List[str]=100 , A : Tuple="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase = SecondaryLearner(A)
# Train secondary learner
UpperCamelCase = train_secondary_learner(
A , A , max_epochs=A , batch_size=A , eval_freq=100 , igf_model_path=A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A_( A : Tuple , A : List[Any] , A : int , A : Optional[int]=32 , A : Any=1000 , A : List[str]=16 , A : List[str]=1.0 , A : Union[str, Any]=recopy_gpta , A : Any=None , A : Dict=10 , A : Any="gpt2_finetuned.pt" , ):
UpperCamelCase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase = RandomSampler(A)
UpperCamelCase = DataLoader(A , sampler=A)
UpperCamelCase = max_steps // (len(A)) + 1
UpperCamelCase = 0
UpperCamelCase = torch.zeros((1, context_len) , dtype=torch.long , device=A)
UpperCamelCase , UpperCamelCase , UpperCamelCase = recopy_model(A , A , A)
model.train()
if secondary_learner is not None:
secondary_learner.to(A)
secondary_learner.eval()
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = []
UpperCamelCase = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase = compute_perplexity(A , A , A)
test_perps.append(A)
print('Test perplexity, step' , A , ':' , A)
for epoch in range(int(A)):
for step, example in enumerate(A):
torch.cuda.empty_cache()
UpperCamelCase = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase = model(A , labels=A)
UpperCamelCase = True
if secondary_learner is not None:
UpperCamelCase = secondary_learner.forward(
torch.tensor(A , dtype=torch.long , device=A).unsqueeze(0))[0].item()
observed_qs.append(float(A))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase = -1
if predicted_q < threshold:
UpperCamelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase = compute_perplexity(A , A , A)
test_perps.append(A)
print('Test perplexity, step' , A , ':' , A)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A_( ):
UpperCamelCase = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=A , type=A , required=A , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=A , type=A , required=A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=A , default=A , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=A , default=A , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=A , type=A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=A , default=A , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=A , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=A , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=A , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=A , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=A , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=A , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=A , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=A , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=A , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=A , type=A , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=A , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=A , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=A , type=A , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=A , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase = training_secondary_learner(
A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase , UpperCamelCase = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=A)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A , A , A , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=A , secondary_learner=A , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 3 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a :
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return None
class a :
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return None
class a ( unittest.TestCase ):
snake_case__ = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_snake_case , 'tf' , 12 , **_snake_case )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_snake_case , 'pt' , 12 , **_snake_case )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
from transformers import BertModel
lowerCAmelCase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(_snake_case ) )
vocab_file.flush()
lowerCAmelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCAmelCase = BertModel(BertConfig(vocab_size=len(_snake_case ) ) )
model.save_pretrained(_snake_case )
self._test_export(_snake_case , 'pt' , 12 , _snake_case )
@require_tf
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase = self._test_export(_snake_case , 'tf' , 12 , **_snake_case )
lowerCAmelCase = quantize(Path(_snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase = self._test_export(_snake_case , 'pt' , 12 , **_snake_case )
lowerCAmelCase = quantize(_snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCAmelCase = Path(_snake_case ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case )
return path
except Exception as e:
self.fail(_snake_case )
@require_torch
@require_tokenizers
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
from transformers import BertModel
lowerCAmelCase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCAmelCase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_snake_case , _snake_case , 'pt' )
@require_tf
@require_tokenizers
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
from transformers import TFBertModel
lowerCAmelCase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCAmelCase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_snake_case , _snake_case , 'tf' )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = FeatureExtractionPipeline(_snake_case , _snake_case )
lowerCAmelCase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = infer_shapes(_snake_case , _snake_case )
# Assert all variables are present
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _snake_case )
self.assertSequenceEqual(variable_names[3:] , _snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCAmelCase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCAmelCase ,lowerCAmelCase = ensure_valid_input(FuncContiguousArgs() , _snake_case , _snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_snake_case ) , set(_snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_snake_case , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCAmelCase ,lowerCAmelCase = ensure_valid_input(FuncNonContiguousArgs() , _snake_case , _snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_snake_case ) , 1 )
self.assertEqual(len(_snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 4 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :Any , __lowerCamelCase :str , __lowerCamelCase :List[Any] , __lowerCamelCase :int , __lowerCamelCase :Tuple , __lowerCamelCase :Any ):
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_lowerCAmelCase = arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :str , __lowerCamelCase :int ):
# A temporary array to store all combination one by one
_lowerCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowercase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 5 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
import sys
import turtle
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: tuple[float, float] , UpperCamelCase__: tuple[float, float] ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: tuple[float, float] , UpperCamelCase__: tuple[float, float] , UpperCamelCase__: tuple[float, float] , UpperCamelCase__: int , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 )
triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 )
triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
_lowerCamelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
_lowerCamelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) | 6 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a = logging.get_logger(__name__) # pylint: disable=invalid-name
a = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _snake_case ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : List[str]=8 ) -> Any:
'''simple docstring'''
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=5_12 , _snake_case : Union[str, Any]=5_12 ) -> List[str]:
'''simple docstring'''
_A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A = np.array(pil_image.convert('RGB' ) )
_A = arr.astype(np.floataa ) / 127.5 - 1
_A = np.transpose(_snake_case , [2, 0, 1] )
_A = torch.from_numpy(_snake_case ).unsqueeze(0 )
return image
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , movq=_UpperCAmelCase , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Dict ):
# get the original timestep using init_timestep
_A = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_A = max(num_inference_steps - init_timestep , 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=None ):
if not isinstance(_UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCAmelCase )}''' )
_A = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
_A = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A = image
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_A = torch.cat(_UpperCAmelCase , dim=0 )
else:
_A = self.movq.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
_A = self.movq.config.scaling_factor * init_latents
_A = torch.cat([init_latents] , dim=0 )
_A = init_latents.shape
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_A = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = init_latents
return latents
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device(F'''cuda:{gpu_id}''' )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_A = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A = cpu_offload_with_hook(_UpperCAmelCase , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : Tuple ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCAmelCase )
def __call__( self : Any , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 100 , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : float = 0.3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ):
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
_A = image_embeds.shape[0]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = negative_image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = [image]
if not all(isinstance(_UpperCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(_UpperCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_A = torch.cat([prepare_image(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for i in image] , dim=0 )
_A = image.to(dtype=image_embeds.dtype , device=_UpperCAmelCase )
_A = self.movq.encode(_UpperCAmelCase )['latents']
_A = latents.repeat_interleave(_UpperCAmelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase )
_A , _A = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A = downscale_height_and_width(_UpperCAmelCase , _UpperCAmelCase , self.movq_scale_factor )
_A = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase )
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {'image_embeds': image_embeds}
_A = self.unet(
sample=_UpperCAmelCase , timestep=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , added_cond_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A = noise_pred.chunk(2 )
_A , _A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase , )[0]
# post-processing
_A = self.movq.decode(_UpperCAmelCase , force_not_quantize=_UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
'''simple docstring'''
import sys
lowercase__ : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _lowerCAmelCase ( __snake_case : str ) -> int:
__A : Dict = 1
for digit in s:
product *= int(__snake_case )
return product
def _lowerCAmelCase ( __snake_case : str = N ) -> int:
__A : List[Any] = -sys.maxsize - 1
__A : str = n[:13]
__A : Any = 13
while cur_index < len(__snake_case ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__A : Dict = substr[1:] + n[cur_index]
cur_index += 1
else:
__A : Tuple = max(__snake_case , str_eval(__snake_case ) )
__A : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 9 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _snake_case ( __snake_case , __snake_case=False ):
try:
_UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
_lowerCAmelCase = parse_flag_from_env("RUN_SLOW", default=False)
_lowerCAmelCase = parse_flag_from_env("RUN_REMOTE", default=False)
_lowerCAmelCase = parse_flag_from_env("RUN_LOCAL", default=True)
_lowerCAmelCase = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
_lowerCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
_lowerCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
_lowerCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
_lowerCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
_lowerCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
_lowerCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
_lowerCAmelCase = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def _snake_case ( __snake_case ):
try:
import faiss # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires faiss''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
try:
import regex # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires regex''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
try:
import elasticsearch # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
try:
import sqlalchemy # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not config.TORCH_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires PyTorch''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not config.TF_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not config.JAX_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires JAX''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not config.PIL_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires Pillow''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
def _require_spacy_model(__snake_case ):
try:
import spacy # noqa F401
spacy.load(__snake_case )
except ImportError:
return unittest.skip('''test requires spacy''' )(__snake_case )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__snake_case ) )(__snake_case )
else:
return test_case
return _require_spacy_model
def _snake_case ( __snake_case ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCamelCase = unittest.skip('''test is slow''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not _run_local_tests or _run_local_tests == 0:
_UpperCamelCase = unittest.skip('''test is local''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCamelCase = unittest.skip('''test is packaged''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCamelCase = unittest.skip('''test requires remote''' )(__snake_case )
return test_case
def _snake_case ( *__snake_case ):
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__snake_case ) and name.startswith('''test''' ):
for decorator in decorators:
_UpperCamelCase = decorator(__snake_case )
setattr(cls , __snake_case , __snake_case )
return cls
return decorate
class lowerCAmelCase_ ( __lowercase ):
pass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@contextmanager
def _snake_case ( __snake_case=OfflineSimulationMode.CONNECTION_FAILS , __snake_case=1E-16 ):
_UpperCamelCase = requests.Session().request
def timeout_request(__snake_case , __snake_case , __snake_case , **__snake_case ):
# Change the url to an invalid url so that the connection hangs
_UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCamelCase = timeout
try:
return online_request(__snake_case , __snake_case , **__snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCamelCase = url
_UpperCamelCase = e.args[0]
_UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' , f"""OfflineMock[{url}]""" ),)
_UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__snake_case , __snake_case , **__snake_case ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=__snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , __snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , __snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , __snake_case ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _snake_case ( *__snake_case , **__snake_case ):
_UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__snake_case , **__snake_case ) as tmp_dir:
try:
os.chdir(__snake_case )
yield
finally:
os.chdir(__snake_case )
@contextmanager
def _snake_case ( ):
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _snake_case ( ):
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _snake_case ( __snake_case , __snake_case ):
return deepcopy(__snake_case ).integers(0 , 100 , 10 ).tolist() == deepcopy(__snake_case ).integers(0 , 100 , 10 ).tolist()
def _snake_case ( __snake_case ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__snake_case , *__snake_case , **__snake_case ):
try:
return func(*__snake_case , **__snake_case )
except HTTPError as err:
if str(__snake_case ).startswith('''500''' ) or str(__snake_case ).startswith('''502''' ):
pytest.xfail(str(__snake_case ) )
raise err
return decorator.decorator(_wrapper , __snake_case )
class lowerCAmelCase_ :
def __init__( self : Any , _A : Dict , _A : str , _A : Any ):
_UpperCamelCase = returncode
_UpperCamelCase = stdout
_UpperCamelCase = stderr
async def _snake_case ( __snake_case , __snake_case ):
while True:
_UpperCamelCase = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def _snake_case ( __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(__snake_case ) )
_UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase = []
_UpperCamelCase = []
def tee(__snake_case , __snake_case , __snake_case , __snake_case="" ):
_UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='''stderr:''' ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def _snake_case ( __snake_case , __snake_case=None , __snake_case=None , __snake_case=180 , __snake_case=False , __snake_case=True ):
_UpperCamelCase = asyncio.get_event_loop()
_UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
_UpperCamelCase = ''' '''.join(__snake_case )
if result.returncode > 0:
_UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def _snake_case ( ):
_UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
_UpperCamelCase = re.sub(R'''^gw''' , '''''' , __snake_case , 0 , re.M )
return int(__snake_case )
def _snake_case ( ):
_UpperCamelCase = 29500
_UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 10 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowercase_ = get_logger(__name__)
class __A :
'''simple docstring'''
def __init__(self , A , A=None ) -> List[str]:
"""simple docstring"""
_a = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , A , getattr(A , A ) )
_a = module._original_module if isinstance(A , _PatchedModuleObj ) else module
class __A :
'''simple docstring'''
__lowerCamelCase : Tuple = []
def __init__(self , A , A , A , A=None ) -> Optional[int]:
"""simple docstring"""
_a = obj
_a = target
_a = new
_a = target.split('''.''' )[0]
_a = {}
_a = attrs or []
def __enter__(self ) -> Any:
"""simple docstring"""
*_a , _a = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(A ) ):
try:
_a = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_a = getattr(self.obj , A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(A , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_a = obj_attr
# patch at top level
setattr(self.obj , A , _PatchedModuleObj(A , attrs=self.attrs ) )
_a = getattr(self.obj , A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(A , A , _PatchedModuleObj(getattr(A , A , A ) , attrs=self.attrs ) )
_a = getattr(A , A )
# finally set the target attribute
setattr(A , A , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_a = getattr(import_module('''.'''.join(A ) ) , A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , A ) is attr_value:
_a = getattr(self.obj , A )
setattr(self.obj , A , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_a = globals()['''__builtins__'''][target_attr]
setattr(self.obj , A , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__(self , *A ) -> List[str]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , A , self.original.pop(A ) )
def a__ (self ) -> str:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def a__ (self ) -> List[Any]:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 11 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCamelCase__ : Dict = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowerCamelCase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = 'whisper'
__lowerCAmelCase : str = ['past_key_values']
__lowerCAmelCase : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_18_65 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=15_00 , SCREAMING_SNAKE_CASE_=4_48 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : str = vocab_size
lowercase__ : Tuple = num_mel_bins
lowercase__ : int = d_model
lowercase__ : str = encoder_layers
lowercase__ : str = encoder_attention_heads
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Dict = encoder_ffn_dim
lowercase__ : Dict = dropout
lowercase__ : Dict = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : List[str] = activation_function
lowercase__ : Dict = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : List[str] = use_cache
lowercase__ : Tuple = encoder_layers
lowercase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : int = max_source_positions
lowercase__ : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ : Tuple = classifier_proj_size
lowercase__ : Optional[int] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : Optional[int] = apply_spec_augment
lowercase__ : Any = mask_time_prob
lowercase__ : str = mask_time_length
lowercase__ : int = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : List[Any] = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
lowercase__ : Dict = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
])
if self.use_past:
lowercase__ : List[str] = {0: """batch"""}
else:
lowercase__ : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""")
return common_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 2_20_50 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 2_20 , ):
'''simple docstring'''
lowercase__ : str = OrderedDict()
lowercase__ : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = encoder_inputs["""input_features"""].shape[2]
lowercase__ : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = encoder_inputs.pop("""input_features""")
lowercase__ : Optional[Any] = decoder_inputs.pop("""decoder_input_ids""")
if "past_key_values" in decoder_inputs:
lowercase__ : Dict = decoder_inputs.pop("""past_key_values""")
return dummy_inputs
@property
def lowercase__ ( self):
'''simple docstring'''
return 1E-3
| 12 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : int = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A__ : List[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
A__ : Tuple = {"""facebook/blenderbot_small-90M""": 512}
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
__lowerCamelCase : Dict = set()
__lowerCamelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : Optional[Any] = char
__lowerCamelCase : str = set(UpperCAmelCase_ )
return pairs
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="__start__" , SCREAMING_SNAKE_CASE_="__end__" , SCREAMING_SNAKE_CASE_="__unk__" , SCREAMING_SNAKE_CASE_="__null__" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : Tuple = json.load(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
__lowerCamelCase : Dict = merges_handle.read().split('\n' )[1:-1]
__lowerCamelCase : int = [tuple(merge.split() ) for merge in merges]
__lowerCamelCase : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase : Any = {}
@property
def lowercase_ ( self ) -> int:
return len(self.encoder )
def lowercase_ ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
if token in self.cache:
return self.cache[token]
__lowerCamelCase : Dict = re.sub('([.,!?()])' , r' \1' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = re.sub('(\')' , r' \1 ' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = re.sub(r'\s{2,}' , ' ' , SCREAMING_SNAKE_CASE_ )
if "\n" in token:
__lowerCamelCase : List[str] = token.replace('\n' , ' __newln__' )
__lowerCamelCase : Dict = token.split(' ' )
__lowerCamelCase : List[str] = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE_ ):
continue
__lowerCamelCase : Optional[int] = token.lower()
__lowerCamelCase : List[str] = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__lowerCamelCase : List[Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
words.append(SCREAMING_SNAKE_CASE_ )
continue
while True:
__lowerCamelCase : List[Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase : Optional[int] = bigram
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__lowerCamelCase : Any = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
new_word.extend(word[i:j] )
__lowerCamelCase : Optional[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : str = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__lowerCamelCase : List[str] = get_pairs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = '@@ '.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = word[:-4]
__lowerCamelCase : Union[str, Any] = word
words.append(SCREAMING_SNAKE_CASE_ )
return " ".join(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[str] = re.findall(r'\S+\n?' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) ) )
return split_tokens
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : List[str] = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Tuple = ' '.join(SCREAMING_SNAKE_CASE_ ).replace('@@ ' , '' ).strip()
return out_string
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Dict = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Dict = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
__lowerCamelCase : int = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase : int = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 13 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
import collections
import os
import re
from pathlib import Path
a__ = '''src/transformers'''
# Matches is_xxx_available()
a__ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a__ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a__ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a__ = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a__ = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a__ = re.compile(R'''^\s*try:''')
# Catches a line with else:
a__ = re.compile(R'''^\s*else:''')
def __UpperCAmelCase ( __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if _re_test_backend.search(__a ) is None:
return None
_a : List[str] = [b[0] for b in _re_backend.findall(__a )]
backends.sort()
return "_and_".join(__a )
def __UpperCAmelCase ( __a : Optional[int] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : List[str] = f.readlines()
_a : List[str] = 0
while line_index < len(__a ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__a ):
return None
# First grab the objects without a specific backend in _import_structure
_a : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_a : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__a ):
_a : Optional[Any] = _re_one_line_import_struct.search(__a ).groups()[0]
_a : Optional[int] = re.findall(R'''\[([^\]]+)\]''' ,__a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_a : Optional[Any] = _re_import_struct_key_value.search(__a )
if single_line_import_search is not None:
_a : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__a ) > 0]
objects.extend(__a )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_a : int = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_a : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_a : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_a : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_a : str = lines[line_index]
if _re_import_struct_add_one.search(__a ) is not None:
objects.append(_re_import_struct_add_one.search(__a ).groups()[0] )
elif _re_import_struct_add_many.search(__a ) is not None:
_a : Any = _re_import_struct_add_many.search(__a ).groups()[0].split(''', ''' )
_a : List[Any] = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_between_brackets.search(__a ) is not None:
_a : List[Any] = _re_between_brackets.search(__a ).groups()[0].split(''', ''' )
_a : Dict = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_quote_object.search(__a ) is not None:
objects.append(_re_quote_object.search(__a ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_a : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_a : str = []
while (
line_index < len(__a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_a : Union[str, Any] = lines[line_index]
_a : Optional[int] = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_a : Optional[Any] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__a ):
# If the line is an if is_backend_available, we grab all objects associated.
_a : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_a : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_a : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_a : Tuple = lines[line_index]
_a : Optional[Any] = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_a : Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> Tuple:
"""simple docstring"""
def find_duplicates(__a : str ):
return [k for k, v in collections.Counter(__a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_a : Union[str, Any] = []
for key in import_dict_objects.keys():
_a : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_a : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_a : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
_a : Optional[Any] = os.path.join(__a ,'''__init__.py''' )
_a : Union[str, Any] = parse_init(__a )
if objects is not None:
_a : Optional[int] = analyze_results(*__a )
if len(__a ) > 0:
_a : int = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__a ) )
if len(__a ) > 0:
raise ValueError('''\n\n'''.join(__a ) )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Dict = []
for path, directories, files in os.walk(__a ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__a ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_a : Optional[Any] = str((Path(__a ) / folder).relative_to(__a ) )
_a : int = short_path.replace(os.path.sep ,'''.''' )
submodules.append(__a )
for fname in files:
if fname == "__init__.py":
continue
_a : List[Any] = str((Path(__a ) / fname).relative_to(__a ) )
_a : Dict = short_path.replace('''.py''' ,'''''' ).replace(os.path.sep ,'''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__a )
return submodules
a__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
from transformers.utils import direct_transformers_import
_a : str = direct_transformers_import(__a )
_a : Dict = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__a ,'''__init__.py''' ) ,'''r''' ) as f:
_a : int = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' ,__a ) ) )
_a : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__a ) > 0:
_a : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 14 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
import qiskit
def UpperCamelCase ( __magic_name__ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowercase__ = qubits
# Using Aer's simulator
lowercase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowercase__ = qiskit.QuantumCircuit(__magic_name__ , __magic_name__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __magic_name__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __magic_name__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__magic_name__ ) ) , list(range(__magic_name__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowercase__ = qiskit.execute(__magic_name__ , __magic_name__ , shots=1000 )
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 15 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__A : Any = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(4_2)
__A : Tuple = 'sshleifer/student_marian_en_ro_6_1'
__A : List[Any] = 'sshleifer/tiny-mbart'
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Any , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , ):
SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
SCREAMING_SNAKE_CASE = [log for log in logs if "eval_loss" in log.keys()]
SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _snake_case ( self : List[str] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _snake_case ( self : str ):
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def _snake_case ( self : List[Any] ):
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : Union[str, Any] ):
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : Dict ):
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : str ):
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=__lowerCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def _snake_case ( self : Optional[int] ):
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def _snake_case ( self : Tuple ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def _snake_case ( self : Any , __lowerCamelCase : str ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
SCREAMING_SNAKE_CASE = experiments[experiment_id]
SCREAMING_SNAKE_CASE = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
SCREAMING_SNAKE_CASE = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["extra_args_str"] )
SCREAMING_SNAKE_CASE = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["n_matches"] )
@slow
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCamelCase , )
# Check metrics
SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(__lowerCamelCase , "trainer_state.json" ) ).log_history
SCREAMING_SNAKE_CASE = [log for log in logs if "eval_loss" in log.keys()]
SCREAMING_SNAKE_CASE = eval_metrics[0]
SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , __lowerCamelCase )
# test if do_predict saves generations and metrics
SCREAMING_SNAKE_CASE = os.listdir(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _snake_case ( self : List[str] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
SCREAMING_SNAKE_CASE = "--skip_memory_metrics 0"
SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=128 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(__lowerCamelCase , "trainer_state.json" ) ).log_history
SCREAMING_SNAKE_CASE = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
SCREAMING_SNAKE_CASE = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
SCREAMING_SNAKE_CASE = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
SCREAMING_SNAKE_CASE = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ):
SCREAMING_SNAKE_CASE = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(__lowerCamelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(__lowerCamelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
SCREAMING_SNAKE_CASE = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(__lowerCamelCase )}\n ".split()
SCREAMING_SNAKE_CASE = "\n --do_predict\n ".split()
SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
SCREAMING_SNAKE_CASE = get_gpu_count()
SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
SCREAMING_SNAKE_CASE = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
SCREAMING_SNAKE_CASE = ["run_translation.py"] + args
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
main()
return output_dir | 16 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(a__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(a__ ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def __a(SCREAMING_SNAKE_CASE_ : bytes ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) != 32:
raise ValueError("Input must be of length 32" )
_lowerCAmelCase = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
_lowerCAmelCase = format(SCREAMING_SNAKE_CASE_ , "08x" )[-8:]
_lowerCAmelCase = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def __a(SCREAMING_SNAKE_CASE_ : bytes ):
'''simple docstring'''
_lowerCAmelCase = B""
for char in message:
bit_string += format(SCREAMING_SNAKE_CASE_ , "08b" ).encode("utf-8" )
_lowerCAmelCase = format(len(SCREAMING_SNAKE_CASE_ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(SCREAMING_SNAKE_CASE_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __a(SCREAMING_SNAKE_CASE_ : bytes ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 512 ):
_lowerCAmelCase = bit_string[pos : pos + 512]
_lowerCAmelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
_lowerCAmelCase = format(SCREAMING_SNAKE_CASE_ , "032b" )
_lowerCAmelCase = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(SCREAMING_SNAKE_CASE_ , 2 )
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return (a + b) % 2**32
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __a(SCREAMING_SNAKE_CASE_ : bytes ):
'''simple docstring'''
_lowerCAmelCase = preprocess(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCAmelCase = 0X67452301
_lowerCAmelCase = 0XEFCDAB89
_lowerCAmelCase = 0X98BADCFE
_lowerCAmelCase = 0X10325476
_lowerCAmelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = aa
_lowerCAmelCase = ba
_lowerCAmelCase = ca
_lowerCAmelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCAmelCase = d ^ (b & (c ^ d))
_lowerCAmelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCAmelCase = c ^ (d & (b ^ c))
_lowerCAmelCase = (5 * i + 1) % 16
elif i <= 47:
_lowerCAmelCase = b ^ c ^ d
_lowerCAmelCase = (3 * i + 5) % 16
else:
_lowerCAmelCase = c ^ (b | not_aa(SCREAMING_SNAKE_CASE_ ))
_lowerCAmelCase = (7 * i) % 16
_lowerCAmelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCAmelCase = d
_lowerCAmelCase = c
_lowerCAmelCase = b
_lowerCAmelCase = sum_aa(SCREAMING_SNAKE_CASE_ , left_rotate_aa(SCREAMING_SNAKE_CASE_ , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCAmelCase = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = sum_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ ) + reformat_hex(SCREAMING_SNAKE_CASE_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1_28 , __a=32 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = NezhaModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = NezhaModel(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = NezhaForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = NezhaForNextSentencePrediction(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = NezhaForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = NezhaForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NezhaForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NezhaForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = NezhaForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> str:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = NezhaModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = NezhaModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@slow
@require_torch_gpu
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=__a)
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = torch.jit.trace(
__a , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , '''bert.pt'''))
_UpperCamelCase = torch.jit.load(os.path.join(__a , '''bert.pt''') , map_location=__a)
loaded(inputs_dict['''input_ids'''].to(__a) , inputs_dict['''attention_mask'''].to(__a))
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''')
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''')
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 6, 2_11_28))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
| 19 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = True , lowercase_ = "arrow" , **lowercase_ , ) -> Union[str, Any]:
super().__init__(
split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , **lowercase_ , )
a__ =load_from_cache_file
a__ =file_format
a__ =Spark(
df=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , working_dir=lowercase_ , **lowercase_ , )
def __UpperCamelCase ( self) -> List[Any]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
a__ =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 20 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = CanineTokenizer
UpperCamelCase = False
def A__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
__magic_name__ : Optional[int] =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def A__ ( self :Optional[int] , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : Any =self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
__magic_name__ : Optional[int] =10_24
return tokenizer
@require_torch
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : str =self.canine_tokenizer
__magic_name__ : Any =["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__magic_name__ : Optional[int] =[5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__magic_name__ : Dict =tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
__magic_name__ : Optional[int] =list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.canine_tokenizer
__magic_name__ : Optional[Any] =["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__magic_name__ : int =tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __snake_case )
self.assertIn("""attention_mask""" , __snake_case )
self.assertIn("""token_type_ids""" , __snake_case )
@require_torch
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Dict =self.canine_tokenizer
__magic_name__ : List[Any] =[
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__magic_name__ : Any =tokenizer(
text_target=__snake_case , max_length=32 , padding="""max_length""" , truncation=__snake_case , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : Tuple =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Any =tempfile.mkdtemp()
__magic_name__ : Union[str, Any] =""" He is very happy, UNwant\u00E9d,running"""
__magic_name__ : List[str] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
__magic_name__ : Optional[Any] =tokenizer.__class__.from_pretrained(__snake_case )
__magic_name__ : str =after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
shutil.rmtree(__snake_case )
__magic_name__ : int =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str =tempfile.mkdtemp()
__magic_name__ : Optional[int] =""" He is very happy, UNwant\u00E9d,running"""
__magic_name__ : Optional[Any] =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__magic_name__ : Optional[int] =chr(0xE_0_0_7 )
additional_special_tokens.append(__snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__magic_name__ : List[Any] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
__magic_name__ : Optional[Any] =tokenizer.__class__.from_pretrained(__snake_case )
__magic_name__ : List[Any] =after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
self.assertIn(__snake_case , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : Optional[int] =tokenizer.__class__.from_pretrained(__snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__magic_name__ , __magic_name__ : List[str] =self.get_clean_sequence(__snake_case )
# a special token for Canine can be defined as follows:
__magic_name__ : Tuple =0xE_0_0_5
__magic_name__ : Tuple =chr(__snake_case )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : Optional[int] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertEqual(len(__snake_case ) , 1 )
__magic_name__ : Any =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__snake_case )
__magic_name__ : Union[str, Any] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__magic_name__ : Optional[int] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__magic_name__ : Union[str, Any] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertEqual(__snake_case , input_encoded + special_token_id )
__magic_name__ : List[str] =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
self.assertTrue(special_token not in decoded )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__magic_name__ : Tuple =chr(0xE_0_0_5 )
__magic_name__ : Union[str, Any] =chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__snake_case )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__magic_name__ : List[Any] =tokenizer.tokenize(__snake_case )
__magic_name__ : Union[str, Any] =tokenizer.tokenize(__snake_case )
self.assertEqual(len(__snake_case ) , 1 )
self.assertEqual(len(__snake_case ) , 1 )
self.assertEqual(token_a[0] , __snake_case )
self.assertEqual(token_a[0] , __snake_case )
@require_tokenizers
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : Dict =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__magic_name__ : Dict =0xE_0_0_6
__magic_name__ : Tuple =chr(__snake_case )
__magic_name__ : str =AddedToken(__snake_case , lstrip=__snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__snake_case )
tokenizer.from_pretrained(__snake_case )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : str =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__ : List[Any] =json.load(__snake_case )
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__ : str =json.load(__snake_case )
# a special token for Canine can be defined as follows:
__magic_name__ : int =0xE_0_0_6
__magic_name__ : List[str] =chr(__snake_case )
__magic_name__ : Union[str, Any] =[new_token_a]
__magic_name__ : List[Any] =[new_token_a]
with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__snake_case , __snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : Union[str, Any] =tokenizer_class.from_pretrained(__snake_case , extra_ids=0 )
self.assertIn(__snake_case , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__magic_name__ : str =0xE_0_0_7
__magic_name__ : Optional[int] =chr(__snake_case )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : List[Any] =[AddedToken(__snake_case , lstrip=__snake_case )]
__magic_name__ : str =tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , extra_ids=0 )
self.assertIn(__snake_case , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : List[str] =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__magic_name__ : Dict ="""hello world"""
if self.space_between_special_tokens:
__magic_name__ : Dict ="""[CLS] hello world [SEP]"""
else:
__magic_name__ : int =input
__magic_name__ : Any =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__magic_name__ : List[Any] =tokenizer.decode(__snake_case , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__snake_case , [output, output.lower()] )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : str =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__magic_name__ : str =[
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__magic_name__ : Union[str, Any] ="""a"""
__magic_name__ : int =ord(__snake_case )
for attr in attributes_list:
setattr(__snake_case , attr + """_id""" , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case )
setattr(__snake_case , attr + """_id""" , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case )
setattr(__snake_case , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [] )
__magic_name__ : Optional[int] =0xE_0_0_6
__magic_name__ : Any =chr(__snake_case )
setattr(__snake_case , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def A__ ( self :int ):
'''simple docstring'''
pass
def A__ ( self :str ):
'''simple docstring'''
pass
def A__ ( self :str ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Any ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
def A__ ( self :int ):
'''simple docstring'''
pass
| 21 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (UpperCamelCase : List[DatasetType] , UpperCamelCase : Optional[List[float]] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[DatasetInfo] = None , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(UpperCamelCase ):
if not isinstance(UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(UpperCamelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase ).__name__}.' )
if i == 0:
_a , _a = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase , UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase , UpperCamelCase , UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , stopping_strategy=UpperCamelCase )
else:
return _interleave_iterable_datasets(
UpperCamelCase , UpperCamelCase , UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , stopping_strategy=UpperCamelCase )
def snake_case_ (UpperCamelCase : List[DatasetType] , UpperCamelCase : Optional[DatasetInfo] = None , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(UpperCamelCase ):
if not isinstance(UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(UpperCamelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase ).__name__}.' )
if i == 0:
_a , _a = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase , UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , axis=UpperCamelCase )
else:
return _concatenate_iterable_datasets(UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , axis=UpperCamelCase )
| 22 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : str = torch.device("""cpu""")
def _snake_case ():
UpperCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ = Image.open(requests.get(__lowercase , stream=__lowercase).raw)
return im
def _snake_case (__lowercase):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01])
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01])
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02])
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02])
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = dct.pop(__lowercase)
UpperCamelCase_ = val
def _snake_case (__lowercase):
UpperCamelCase_ = []
for k in state_dict.keys():
UpperCamelCase_ = k
if ".pwconv" in k:
UpperCamelCase_ = k_new.replace('.pwconv' , '.point_wise_conv')
if ".dwconv" in k:
UpperCamelCase_ = k_new.replace('.dwconv' , '.depth_wise_conv')
if ".Proj." in k:
UpperCamelCase_ = k_new.replace('.Proj.' , '.proj.')
if "patch_embed" in k_new:
UpperCamelCase_ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding')
if "network" in k_new:
UpperCamelCase_ = k_new.split('.')
if ls[2].isdigit():
UpperCamelCase_ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:])
else:
UpperCamelCase_ = k_new.replace('network' , 'swiftformer.encoder.network')
rename_keys.append((k, k_new))
return rename_keys
@torch.no_grad()
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase_ = 1000
UpperCamelCase_ = 'huggingface/label-files'
UpperCamelCase_ = 'imagenet-1k-id2label.json'
UpperCamelCase_ = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset') , 'r'))
UpperCamelCase_ = {int(__lowercase): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase_ = [3, 3, 6, 4]
UpperCamelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase_ = [3, 3, 9, 6]
UpperCamelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase_ = [4, 3, 10, 5]
UpperCamelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase_ = [4, 4, 12, 6]
UpperCamelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https'):
UpperCamelCase_ = torch.hub.load_state_dict_from_url(__lowercase , map_location='cpu' , check_hash=__lowercase)
else:
UpperCamelCase_ = torch.load(__lowercase , map_location='cpu')
UpperCamelCase_ = checkpoint
UpperCamelCase_ = create_rename_keys(__lowercase)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase)
# load HuggingFace model
UpperCamelCase_ = SwiftFormerForImageClassification(__lowercase).eval()
hf_model.load_state_dict(__lowercase)
# prepare test inputs
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = ViTImageProcessor.from_pretrained('preprocessor_config')
UpperCamelCase_ = processor(images=__lowercase , return_tensors='pt')
# compare outputs from both models
UpperCamelCase_ = get_expected_output(__lowercase)
UpperCamelCase_ = hf_model(inputs['pixel_values']).logits
assert hf_logits.shape == torch.Size([1, 1000])
assert torch.allclose(hf_logits[0, 0:5] , __lowercase , atol=1e-3)
Path(__lowercase).mkdir(exist_ok=__lowercase)
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""")
hf_model.save_pretrained(__lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
snake_case__ : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 23 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase_ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ) -> int:
'''simple docstring'''
__snake_case = size if size is not None else {'''height''': 20, '''width''': 20}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = size
__snake_case = do_normalize
__snake_case = do_convert_rgb
__snake_case = [512, 1024, 2048, 4096]
__snake_case = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__snake_case = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : str = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.image_processor_tester.prepare_dummy_image()
__snake_case = self.image_processing_class(**self.image_processor_dict )
__snake_case = 2048
__snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__snake_case = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
__snake_case = '''Hello'''
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = PixaStructImageProcessingTester(self , num_channels=4 )
__snake_case = 3
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__snake_case = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__snake_case = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 24 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
import os
from datetime import datetime as dt
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = Github(os.environ["GITHUB_TOKEN"])
SCREAMING_SNAKE_CASE : Tuple = g.get_repo("huggingface/diffusers")
SCREAMING_SNAKE_CASE : Tuple = repo.get_issues(state="open")
for issue in open_issues:
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(issue.get_comments() , key=lambda _a: i.created_at , reverse=_a)
SCREAMING_SNAKE_CASE : Tuple = comments[0] if len(_a) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed")
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
issue.add_to_labels("stale")
if __name__ == "__main__":
main() | 25 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
'''simple docstring'''
from __future__ import annotations
class _A :
def __init__( self : Optional[int] , __magic_name__ : list[list[int]] ) -> str:
"""simple docstring"""
__snake_case : str = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(__magic_name__ ) != 0:
__snake_case : List[str] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__magic_name__ ) != cols:
raise error
for value in row:
if not isinstance(__magic_name__ , (int, float) ):
raise error
__snake_case : Any = rows
else:
__snake_case : List[Any] = []
def lowercase__ ( self : int ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase__ ( self : str ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase__ ( self : Optional[int] ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase__ ( self : List[str] ) -> Matrix:
"""simple docstring"""
__snake_case : List[str] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__magic_name__ )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase__ ( self : Any ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__magic_name__ ).determinant()
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__magic_name__ , __magic_name__ )
return -1 * self.get_minor(__magic_name__ , __magic_name__ )
def lowercase__ ( self : int ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__magic_name__ , __magic_name__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase__ ( self : List[str] ) -> Matrix:
"""simple docstring"""
__snake_case : List[str] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__magic_name__ )
def lowercase__ ( self : Any ) -> Matrix:
"""simple docstring"""
__snake_case : List[Any] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(__magic_name__ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def lowercase__ ( self : Union[str, Any] , __magic_name__ : list[int] , __magic_name__ : int | None = None ) -> None:
"""simple docstring"""
__snake_case : Tuple = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise type_error
for value in row:
if not isinstance(__magic_name__ , (int, float) ):
raise type_error
if len(__magic_name__ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(__magic_name__ )
else:
__snake_case : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:]
def lowercase__ ( self : List[Any] , __magic_name__ : list[int] , __magic_name__ : int | None = None ) -> None:
"""simple docstring"""
__snake_case : Tuple = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise type_error
for value in column:
if not isinstance(__magic_name__ , (int, float) ):
raise type_error
if len(__magic_name__ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
__snake_case : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__snake_case : List[Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __magic_name__ : object ) -> bool:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , __magic_name__ : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__( self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__( self : List[Any] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Union[str, Any] , __magic_name__ : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__magic_name__ , __magic_name__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(__magic_name__ , __magic_name__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Tuple , __magic_name__ : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
__snake_case : int = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase__ ( cls : Dict , __magic_name__ : list[int] , __magic_name__ : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
set_seed(770)
__A : Dict = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
__A : str = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
__A : Optional[int] = os.path.dirname(os.path.abspath(__file__))
__A : int = os.path.join(os.path.expanduser("~"), ".cache")
__A : Union[str, Any] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
_A = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , local_dir=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> int:
"""simple docstring"""
if model_type == "text":
_A = BarkSemanticModel
_A = BarkSemanticConfig
_A = BarkSemanticGenerationConfig
elif model_type == "coarse":
_A = BarkCoarseModel
_A = BarkCoarseConfig
_A = BarkCoarseGenerationConfig
elif model_type == "fine":
_A = BarkFineModel
_A = BarkFineConfig
_A = BarkFineGenerationConfig
else:
raise NotImplementedError()
_A = F"{model_type}_small" if use_small else model_type
_A = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
_A = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_A = model_args['vocab_size']
_A = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_A = model_args.pop('n_head' )
_A = model_args.pop('n_embd' )
_A = model_args.pop('n_layer' )
_A = ConfigClass(**checkpoint['model_args'] )
_A = ModelClass(config=_SCREAMING_SNAKE_CASE )
_A = GenerationConfigClass()
_A = model_generation_config
_A = checkpoint['model']
# fixup checkpoint
_A = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
_A = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
_A = new_k.replace(_SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
_A = state_dict.pop(_SCREAMING_SNAKE_CASE )
_A = set(state_dict.keys() ) - set(model.state_dict().keys() )
_A = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_A = set(model.state_dict().keys() ) - set(state_dict.keys() )
_A = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_A = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
_A = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(_SCREAMING_SNAKE_CASE , 3 )} loss" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> List[str]:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_A = 'cpu' # do conversion on cpu
_A = _get_ckpt_path(_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
_A = _load_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
_A = _bark_load_model(_SCREAMING_SNAKE_CASE , 'cpu' , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
_A = bark_model['model']
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_A = 5
_A = 10
if model_type in ["text", "coarse"]:
_A = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_A = bark_model(_SCREAMING_SNAKE_CASE )[0]
_A = model(_SCREAMING_SNAKE_CASE )
# take last logits
_A = output_new_model_total.logits[:, [-1], :]
else:
_A = 3
_A = 8
_A = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_A = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = bark_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
_A = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
_A = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
_A = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
_A = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_A = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_A = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_A = BarkModel(_SCREAMING_SNAKE_CASE )
_A = semantic
_A = coarseAcoustic
_A = fineAcoustic
_A = codec
_A = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
__A : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 27 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
import numpy as np
def lowercase__( __UpperCamelCase: np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = ""
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase__ ) ,lowerCAmelCase__ ):
lowerCamelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase__ )
return decoded
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = []
for key in product(lowerCAmelCase__ ,repeat=3 ):
lowerCamelCase_ = try_key(lowerCAmelCase__ ,lowerCAmelCase__ )
if encoded is not None:
possibles.append(lowerCAmelCase__ )
return possibles
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase ( lowerCAmelCase__ = "p059_cipher.txt" ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = Path(lowerCAmelCase__ ).parent.joinpath(lowerCAmelCase__ ).read_text(encoding='''utf-8''' )
lowerCamelCase_ = [int(lowerCAmelCase__ ) for number in data.strip().split(''',''' )]
lowerCamelCase_ = filter_valid_chars(lowerCAmelCase__ )
for common_word in COMMON_WORDS:
lowerCamelCase_ = filter_common_word(lowerCAmelCase__ ,lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 1:
break
lowerCamelCase_ = possibles[0]
return sum(ord(lowerCAmelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = tokenizer(example['''content'''] , truncation=_lowercase )['''input_ids''']
UpperCAmelCase_ : Optional[int] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__a = HfArgumentParser(PretokenizationArguments)
__a = parser.parse_args()
if args.num_workers is None:
__a = multiprocessing.cpu_count()
__a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a = time.time()
__a = load_dataset(args.dataset_name, split='train')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__a = time.time()
__a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""") | 30 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
lowerCamelCase__ : Any = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
lowerCamelCase__ : str = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase__ : List[Any] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True) | 31 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : str = StableDiffusionLatentUpscalePipeline
__A : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__A : Dict = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__A : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__A : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__A : Optional[int] = frozenset([] )
__A : str = True
@property
def UpperCamelCase( self ):
_UpperCAmelCase = 1
_UpperCAmelCase = 4
_UpperCAmelCase = (16, 16)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_UpperCamelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_UpperCamelCase , only_cross_attention=_UpperCamelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
_UpperCAmelCase = EulerDiscreteScheduler(prediction_type='''sample''' )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(_UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu'''
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_UpperCAmelCase = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCamelCase , 1e-3 )
def UpperCamelCase( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCamelCase( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCamelCase( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCamelCase( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCamelCase( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCamelCase( self ):
_UpperCAmelCase = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_UpperCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = 2
_UpperCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCAmelCase = getattr(_UpperCamelCase , scheduler_enum.name )
_UpperCAmelCase = scheduler_cls.from_config(pipe.scheduler.config )
_UpperCAmelCase = pipe(**_UpperCamelCase )[0]
outputs.append(_UpperCamelCase )
assert check_same_shape(_UpperCamelCase )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_UpperCAmelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
_UpperCAmelCase = pipe(_UpperCamelCase , generator=_UpperCamelCase , output_type='''latent''' ).images
_UpperCAmelCase = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCamelCase , output_type='''np''' , ).images[0]
_UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_UpperCAmelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
_UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
_UpperCAmelCase = upscaler(
prompt=_UpperCamelCase , image=_UpperCamelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCamelCase , output_type='''np''' , ).images[0]
_UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 32 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:Optional[Any] , _a:Any=2 , _a:Dict=True , _a:List[Any]=False , _a:List[str]=10 , _a:Union[str, Any]=3 , _a:Tuple=32 * 8 , _a:Dict=32 * 8 , _a:List[str]=4 , _a:Union[str, Any]=64 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = is_training
snake_case__ = use_auxiliary_loss
snake_case__ = num_queries
snake_case__ = num_channels
snake_case__ = min_size
snake_case__ = max_size
snake_case__ = num_labels
snake_case__ = hidden_dim
snake_case__ = hidden_dim
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
snake_case__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
snake_case__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
snake_case__ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
snake_case__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case__ = self.num_queries
snake_case__ = self.num_labels
snake_case__ = [1, 1, 1, 1]
snake_case__ = self.num_channels
snake_case__ = 64
snake_case__ = 1_28
snake_case__ = self.hidden_dim
snake_case__ = self.hidden_dim
snake_case__ = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.prepare_config_and_inputs()
snake_case__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:str ):
snake_case__ = output.encoder_hidden_states
snake_case__ = output.pixel_decoder_hidden_states
snake_case__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] , _a:Optional[int] , _a:List[str] , _a:Tuple=False ):
with torch.no_grad():
snake_case__ = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(pixel_values=_a , pixel_mask=_a )
snake_case__ = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str , _a:Dict , _a:Optional[Any] , _a:str , _a:str ):
snake_case__ = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a:Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ = model(pixel_values=_a , pixel_mask=_a )
snake_case__ = model(_a )
comm_check_on_output(_a )
snake_case__ = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__lowercase : int = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Union[str, Any] = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = MaskaFormerModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case__ = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = (self.model_tester.min_size,) * 2
snake_case__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_a ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_a ),
'''class_labels''': torch.zeros(2 , 10 , device=_a ).long(),
}
snake_case__ = self.model_tester.get_config()
snake_case__ = MaskaFormerForUniversalSegmentation(_a ).to(_a )
snake_case__ = model(**_a )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a ).to(_a )
snake_case__ = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
if not self.model_tester.is_training:
return
snake_case__ = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
snake_case__ = model_class(_a )
model.to(_a )
model.train()
snake_case__ = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
snake_case__ = True
snake_case__ = True
snake_case__ = model_class(_a ).to(_a )
model.train()
snake_case__ = model(_a , mask_labels=_a , class_labels=_a )
snake_case__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
snake_case__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case__ = model(**_a )
snake_case__ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
snake_case__ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
snake_case__ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
snake_case__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case__ = model(**_a )
# masks_queries_logits
snake_case__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case__ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case__ = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
snake_case__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case__ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
snake_case__ = self.default_image_processor
snake_case__ = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case__ = inputs['''pixel_values'''].to(_a )
snake_case__ = [el.to(_a ) for el in inputs['''mask_labels''']]
snake_case__ = [el.to(_a ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case__ = model(**_a )
self.assertTrue(outputs.loss is not None )
| 33 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_ = {'facebook/bart-base': BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_ = {'facebook/bart-base': BartTokenizer}
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' ,type=_lowercase ,default=_lowercase ,help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' ,type=_lowercase ,default=5 ,help='''The maximum total input sequence length after tokenization.''' ,)
parser.add_argument(
'''--num_beams''' ,type=_lowercase ,default=_lowercase ,help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) ,)
parser.add_argument(
'''--model_name_or_path''' ,type=_lowercase ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=_lowercase ,)
parser.add_argument(
'''--config_name''' ,type=_lowercase ,default=_lowercase ,help='''Pretrained config name or path if not the same as model_name''' ,)
parser.add_argument(
'''--device''' ,type=_lowercase ,default='''cpu''' ,help='''Device where the model will be run''' ,)
parser.add_argument('''--output_file_path''' ,type=_lowercase ,default=_lowercase ,help='''Where to store the final ONNX file.''' )
UpperCamelCase = parser.parse_args()
return args
def __snake_case ( _lowercase ,_lowercase="cpu" ):
"""simple docstring"""
UpperCamelCase = model_dict[model_name].from_pretrained(_lowercase ).to(_lowercase )
UpperCamelCase = tokenizer_dict[model_name].from_pretrained(_lowercase )
if model_name in ["facebook/bart-base"]:
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = 0
return huggingface_model, tokenizer
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
model.eval()
UpperCamelCase = None
UpperCamelCase = torch.jit.script(BARTBeamSearchGenerator(_lowercase ) )
with torch.no_grad():
UpperCamelCase = '''My friends are cool but they eat too many carbs.'''
UpperCamelCase = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors='''pt''' ).to(model.device )
UpperCamelCase = model.generate(
inputs['''input_ids'''] ,attention_mask=inputs['''attention_mask'''] ,num_beams=_lowercase ,max_length=_lowercase ,early_stopping=_lowercase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowercase ,(
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowercase ,opset_version=14 ,input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] ,output_names=['''output_ids'''] ,dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} ,example_outputs=_lowercase ,)
logger.info('''Model exported to {}'''.format(_lowercase ) )
UpperCamelCase = remove_dup_initializers(os.path.abspath(_lowercase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_lowercase ) )
UpperCamelCase = onnxruntime.InferenceSession(_lowercase )
UpperCamelCase = ort_sess.run(
_lowercase ,{
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_lowercase ),
'''max_length''': np.array(_lowercase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = parse_args()
UpperCamelCase = 5
UpperCamelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCamelCase = torch.device(args.device )
UpperCamelCase , UpperCamelCase = load_model_tokenizer(args.model_name_or_path ,_lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_lowercase )
if args.max_length:
UpperCamelCase = args.max_length
if args.num_beams:
UpperCamelCase = args.num_beams
if args.output_file_path:
UpperCamelCase = args.output_file_path
else:
UpperCamelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
if __name__ == "__main__":
main() | 34 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase :
def __init__( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Any=13 , _lowercase : int=2 , _lowercase : Optional[int]=24 , _lowercase : Any=16 , _lowercase : Optional[Any]=True , _lowercase : Tuple=True , _lowercase : Optional[Any]=32 , _lowercase : Union[str, Any]=5 , _lowercase : int=4 , _lowercase : int=37 , _lowercase : Optional[Any]="gelu" , _lowercase : str=0.1 , _lowercase : List[str]=0.1 , _lowercase : str=10 , _lowercase : List[str]=0.02 , _lowercase : Dict=None , _lowercase : Union[str, Any]=2 , _lowercase : List[str]=2 , ):
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE__ : List[str] = num_mel_bins
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
SCREAMING_SNAKE_CASE__ : int = frequency_stride
SCREAMING_SNAKE_CASE__ : int = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ : str = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ : str = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_patches + 2
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, input_values, labels
def lowercase__ ( self : Union[str, Any] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase__ ( self : Tuple , _lowercase : str , _lowercase : int , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ASTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : int = False
lowerCamelCase : List[str] = False
def lowercase__ ( self : int , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : int , _lowercase : Dict , _lowercase : int ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowercase__ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''input_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@slow
def lowercase__ ( self : Dict ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = ASTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def a ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = torchaudio.load(A__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : str ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : str = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ : Dict = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_audio()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ : int = feature_extractor(_lowercase , sampling_rate=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
| 35 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Any = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
def UpperCamelCase_ ( __a ) -> bool:
a__ : List[Any] = 0
for ch in input_str:
a__ : str = ord(__a )
a__ : Any = pow(2 , __a )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
snake_case_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case_ = [3, 3, 3, 3]
snake_case_ = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case_ = [4, 4, 4, 4]
snake_case_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case_ = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case_ = [3, 3, 3, 3]
else:
snake_case_ = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case_ = 96
elif "small" in model_name:
snake_case_ = 96
elif "base" in model_name:
snake_case_ = 128
elif "large" in model_name:
snake_case_ = 192
elif "xlarge" in model_name:
snake_case_ = 256
elif "huge" in model_name:
snake_case_ = 352
# set label information
snake_case_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
snake_case_ = '''imagenet-22k-id2label.json'''
else:
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , focal_levels=SCREAMING_SNAKE_CASE__ , focal_windows=SCREAMING_SNAKE_CASE__ , use_conv_embed=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , use_post_layernorm=SCREAMING_SNAKE_CASE__ , use_layerscale=SCREAMING_SNAKE_CASE__ , )
return config
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if "patch_embed.proj" in name:
snake_case_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
snake_case_ = '''encoder.''' + name
if "encoder.layers" in name:
snake_case_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
snake_case_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
snake_case_ = '''layernorm.weight'''
if name == "norm.bias":
snake_case_ = '''layernorm.bias'''
if "head" in name:
snake_case_ = name.replace('''head''' , '''classifier''' )
else:
snake_case_ = '''focalnet.''' + name
return name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
# fmt: off
snake_case_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
snake_case_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
snake_case_ = get_focalnet_config(SCREAMING_SNAKE_CASE__ )
snake_case_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify conversion
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE__ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , )
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
snake_case_ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
snake_case_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case_ = image_transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
snake_case_ = model(**SCREAMING_SNAKE_CASE__ )
snake_case_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
snake_case_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
snake_case_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
snake_case_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
snake_case_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
snake_case_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 39 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__ : Tuple ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase : Optional[int] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 40 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
lowerCAmelCase__ = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = CamembertTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = CamembertTokenizerFast
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
def SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = CamembertTokenizer(lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''<pad>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) ,lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-1] ,'''<mask>''' )
self.assertEqual(len(lowercase__ ) ,1_0_0_4 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_5 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = CamembertTokenizer(lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
__lowercase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowercase = '''I was born in 92000, and this is falsé.'''
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowercase = tokenizer.convert_ids_to_tokens(lowercase__ )
__lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = '''I was born in 92000, and this is falsé.'''
__lowercase = tokenizer.tokenize(lowercase__ )
__lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# fmt: off
__lowercase = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowercase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ ,model_name='''camembert-base''' ,revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' ,sequences=lowercase__ ,)
| 41 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> list:
if len(__UpperCamelCase ) <= 1:
return [tuple(__UpperCamelCase )]
lowerCamelCase_ = []
def generate(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = [0] * n
res.append(tuple(__UpperCamelCase ) )
lowerCamelCase_ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase_ ,lowerCamelCase_ = arr[i], arr[0]
else:
lowerCamelCase_ ,lowerCamelCase_ = arr[i], arr[c[i]]
res.append(tuple(__UpperCamelCase ) )
c[i] += 1
lowerCamelCase_ = 0
else:
lowerCamelCase_ = 0
i += 1
generate(len(__UpperCamelCase ) ,__UpperCamelCase )
return res
if __name__ == "__main__":
A_ = input("Enter numbers separated by a comma:\n").strip()
A_ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 42 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 20 ):
"""simple docstring"""
_lowerCamelCase : Tuple = 1
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = lcm(_lowerCAmelCase , _lowerCAmelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''') | 44 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case : ClassVar[Features] = Features({"""image""": Image()} )
_snake_case : ClassVar[Features] = Features({"""labels""": ClassLabel} )
_snake_case : str = "image"
_snake_case : str = "labels"
def __a ( self :Any , lowerCamelCase__ :Dict ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCamelCase__ :Union[str, Any] = copy.deepcopy(self )
UpperCamelCase__ :Any = self.label_schema.copy()
UpperCamelCase__ :int = features[self.label_column]
UpperCamelCase__ :Optional[Any] = label_schema
return task_template
@property
def __a ( self :List[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
} | 45 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert) | 46 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = ['image_processor', 'tokenizer']
snake_case__ :List[Any] = 'ChineseCLIPImageProcessor'
snake_case__ :Optional[int] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = self.image_processor
def __call__( self : List[Any] , __magic_name__ : Tuple=None , __magic_name__ : Any=None , __magic_name__ : str=None , **__magic_name__ : List[str] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase__ = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
lowerCAmelCase__ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , *__magic_name__ : str , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
| 48 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__ ( snake_case_ :int = 5_000 ):
__UpperCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case_ )]
for i, pentagonal_i in enumerate(snake_case_ ):
for j in range(snake_case_ , len(snake_case_ ) ):
__UpperCAmelCase = pentagonal_nums[j]
__UpperCAmelCase = pentagonal_i + pentagonal_j
__UpperCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case_ ) and is_pentagonal(snake_case_ ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'speech_to_text'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self ,_lowerCAmelCase=1_00_00 ,_lowerCAmelCase=12 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=4 ,_lowerCAmelCase=6 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=4 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu" ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=2 ,_lowerCAmelCase=True ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,_lowerCAmelCase=2 ,_lowerCAmelCase=60_00 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=2 ,_lowerCAmelCase=(5, 5) ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=80 ,_lowerCAmelCase=1 ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = use_cache
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = max_source_positions
lowerCamelCase__ = max_target_positions
lowerCamelCase__ = num_conv_layers
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = conv_channels
lowerCamelCase__ = input_feat_per_channel
lowerCamelCase__ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
| 50 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A = random.Random()
def __A ( a_ :List[str] , a_ :int=1.0 , a_ :Optional[Any]=None , a_ :int=None) -> List[Any]:
if rng is None:
__a : int = global_rng
__a : Optional[Any] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=16000 , _UpperCAmelCase=True , _UpperCAmelCase=80 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase="hann_window" , _UpperCAmelCase=80 , _UpperCAmelCase=7600 , _UpperCAmelCase=1e-1_0 , _UpperCAmelCase=True , ):
__a : Optional[Any] = parent
__a : int = batch_size
__a : Optional[int] = min_seq_length
__a : Any = max_seq_length
__a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Union[str, Any] = feature_size
__a : Optional[int] = padding_value
__a : int = sampling_rate
__a : str = do_normalize
__a : int = num_mel_bins
__a : Dict = hop_length
__a : Dict = win_length
__a : Dict = win_function
__a : Optional[Any] = fmin
__a : Union[str, Any] = fmax
__a : Tuple = mel_floor
__a : Optional[Any] = return_attention_mask
def _lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Any = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
if equal_length:
__a : int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Optional[int] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : List[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = SpeechTaFeatureExtractor
def _lowerCamelCase ( self ):
__a : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def _lowerCamelCase ( self , _UpperCAmelCase ):
self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__a : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
__a : Any = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values
__a : List[Any] = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def _lowerCamelCase ( self ):
__a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
__a : Tuple = [None, 1600, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='''np''' )
__a : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Any = range(800 , 1400 , 200 )
__a : Dict = [floats_list((1, x) )[0] for x in lengths]
__a : int = ['''longest''', '''max_length''', '''do_not_pad''']
__a : Any = [None, 1600, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
__a : int = feat_extract(_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase )
__a : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : int = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
__a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : List[str] = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
__a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__a : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Optional[int] = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
__a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Optional[int] = np.random.rand(100 ).astype(np.floataa )
__a : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Tuple = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
__a : Union[str, Any] = feature_extractor(audio_target=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__a : Tuple = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
__a : int = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
__a : Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
__a : Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__a : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : str = np.asarray(_UpperCAmelCase )
__a : List[str] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
__a : str = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
__a : Tuple = feat_extract.model_input_names[0]
__a : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
__a : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase )
__a : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
__a : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self ):
__a : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase )
__a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__a : List[str] = feat_extract.model_input_names[0]
__a : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
__a : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self ):
__a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__a : str = self.feat_extract_tester.prepare_inputs_for_target()
__a : Tuple = feat_extract.model_input_names[0]
__a : Optional[int] = BatchFeature({input_name: speech_inputs} )
__a : int = feat_extract.num_mel_bins # hack!
__a : Any = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
__a : Optional[int] = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feat_extract_dict
__a : str = True
__a : Dict = self.feature_extraction_class(**_UpperCAmelCase )
__a : int = self.feat_extract_tester.prepare_inputs_for_target()
__a : Optional[Any] = [len(_UpperCAmelCase ) for x in speech_inputs]
__a : Any = feat_extract.model_input_names[0]
__a : Dict = BatchFeature({input_name: speech_inputs} )
__a : Any = feat_extract.num_mel_bins # hack!
__a : Tuple = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Any = self.feat_extract_dict
__a : Dict = True
__a : List[str] = self.feature_extraction_class(**_UpperCAmelCase )
__a : str = self.feat_extract_tester.prepare_inputs_for_target()
__a : Optional[int] = [len(_UpperCAmelCase ) for x in speech_inputs]
__a : Tuple = feat_extract.model_input_names[0]
__a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
__a : Tuple = min(_UpperCAmelCase )
__a : str = feat_extract.num_mel_bins # hack!
__a : Dict = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
from datasets import load_dataset
__a : Dict = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : Tuple = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
# fmt: off
__a : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
__a : Union[str, Any] = self._load_datasamples(1 )
__a : str = SpeechTaFeatureExtractor()
__a : List[str] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _UpperCAmelCase , atol=1e-6 ) )
def _lowerCamelCase ( self ):
# fmt: off
__a : Tuple = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__a : Dict = self._load_datasamples(1 )
__a : Any = SpeechTaFeatureExtractor()
__a : List[Any] = feature_extractor(audio_target=_UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1e-4 ) ) | 52 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""image_processor""", """tokenizer"""]
a_ = """BlipImageProcessor"""
a_ = """AutoTokenizer"""
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ) -> Optional[Any]:
__lowerCAmelCase = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self.image_processor
def __call__( self : List[str] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCAmelCase = self.tokenizer
__lowerCAmelCase = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
__lowerCAmelCase = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
__lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def lowercase ( self : Dict , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ) -> int:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 53 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaControlnetPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''hint''']
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''hint''']
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any]=0 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create hint
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
UpperCAmelCase_ =torch.from_numpy(np.array(_lowerCAmelCase ) ).float() / 2_55.0
UpperCAmelCase_ =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A robot, 4k photo"
UpperCAmelCase_ =torch.Generator(device="cuda" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =torch.Generator(device="cuda" ).manual_seed(0 )
UpperCAmelCase_ =pipeline(
image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , hint=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ,A : GenericTensor ):
if self.framework == "tf":
__A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__A = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=A )
else:
raise ValueError("Unsupported framework" )
return masked_index
def UpperCamelCase_ ( self : Optional[Any] ,A : GenericTensor ):
__A = self.get_masked_index(A )
__A = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" ,self.model.base_model_prefix ,f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' ,)
def UpperCamelCase_ ( self : str ,A : GenericTensor ):
if isinstance(A ,A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def UpperCamelCase_ ( self : List[str] ,A : int ,A : Tuple=None ,**A : List[str] ):
if return_tensors is None:
__A = self.framework
__A = self.tokenizer(A ,return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def UpperCamelCase_ ( self : List[str] ,A : Optional[int] ):
__A = self.model(**A )
__A = model_inputs["input_ids"]
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] ,A : int ,A : str=5 ,A : Optional[int]=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__A = target_ids.shape[0]
__A = model_outputs["input_ids"][0]
__A = model_outputs["logits"]
if self.framework == "tf":
__A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__A = outputs.numpy()
__A = outputs[0, masked_index, :]
__A = stable_softmax(A ,axis=-1 )
if target_ids is not None:
__A = tf.gather_nd(tf.squeeze(A ,0 ) ,target_ids.reshape(-1 ,1 ) )
__A = tf.expand_dims(A ,0 )
__A = tf.math.top_k(A ,k=A )
__A , __A = topk.values.numpy(), topk.indices.numpy()
else:
__A = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__A = outputs[0, masked_index, :]
__A = logits.softmax(dim=-1 )
if target_ids is not None:
__A = probs[..., target_ids]
__A , __A = probs.topk(A )
__A = []
__A = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
__A = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
__A = input_ids.numpy().copy()
if target_ids is not None:
__A = target_ids[p].tolist()
__A = p
# Filter padding out:
__A = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__A = self.tokenizer.decode(A ,skip_special_tokens=A )
__A = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def UpperCamelCase_ ( self : Any ,A : Optional[int] ,A : int=None ):
if isinstance(A ,A ):
__A = [targets]
try:
__A = self.tokenizer.get_vocab()
except Exception:
__A = {}
__A = []
for target in targets:
__A = vocab.get(A ,A )
if id_ is None:
__A = self.tokenizer(
A ,add_special_tokens=A ,return_attention_mask=A ,return_token_type_ids=A ,max_length=1 ,truncation=A ,)["input_ids"]
if len(A ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
"We cannot replace it with anything meaningful, ignoring it" )
continue
__A = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__A = list(set(A ) )
if len(A ) == 0:
raise ValueError("At least one target must be provided when passed." )
__A = np.array(A )
return target_ids
def UpperCamelCase_ ( self : Any ,A : str=None ,A : List[str]=None ):
__A = {}
if targets is not None:
__A = self.get_target_ids(A ,A )
__A = target_ids
if top_k is not None:
__A = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" ,self.model.base_model_prefix ,"The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : Optional[int] ,A : Tuple ,*A : Optional[int] ,**A : str ):
__A = super().__call__(A ,**A )
if isinstance(A ,A ) and len(A ) == 1:
return outputs[0]
return outputs
| 55 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
def _a (lowercase__ : list , lowercase__ : list , lowercase__ : int ) -> int:
"""simple docstring"""
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__snake_case = [p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__snake_case = sorted(lowercase__ )
# declaring useful variables
__snake_case = len(lowercase__ )
__snake_case = 0
__snake_case = 0
__snake_case = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__snake_case = sorted_profit_by_weight[length - i - 1]
__snake_case = profit_by_weight.index(lowercase__ )
__snake_case = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
_a : str = [int(x) for x in input("Input profits separated by spaces: ").split()]
_a : str = [int(x) for x in input("Input weights separated by spaces: ").split()]
_a : Any = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 56 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
A_ : Tuple = 'scheduler_config.json'
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Any =1
a : Union[str, Any] =2
a : List[str] =3
a : List[str] =4
a : Dict =5
a : List[str] =6
a : str =7
a : int =8
a : Dict =9
a : int =10
a : int =11
a : int =12
a : str =13
a : List[str] =14
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : torch.FloatTensor
class _lowerCAmelCase:
"""simple docstring"""
a : Any =SCHEDULER_CONFIG_NAME
a : Optional[int] =[]
a : int =True
@classmethod
def _a ( cls , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=False , **_lowerCamelCase , ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Any = cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , return_commit_hash=_lowerCamelCase , **_lowerCamelCase , )
return cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = False , **_lowerCamelCase ):
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self ):
return self._get_compatibles()
@classmethod
def _a ( cls ):
UpperCamelCase_: Any = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase_: int = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase_: str = [
getattr(_lowerCamelCase , _lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase )
]
return compatible_classes | 57 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''audio-spectrogram-transformer'''
def __init__( self , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=1_6 , _lowercase=True , _lowercase=1_0 , _lowercase=1_0 , _lowercase=1_0_2_4 , _lowercase=1_2_8 , **_lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Tuple = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[Any] = patch_size
snake_case_ : Tuple = qkv_bias
snake_case_ : Any = frequency_stride
snake_case_ : Dict = time_stride
snake_case_ : List[Any] = max_length
snake_case_ : List[str] = num_mel_bins
| 58 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def lowerCAmelCase_ ( __a ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(__a , 0 , __a , args=(__a) )[0]
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
return math.pow(__a , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 59 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase_ = HfApi()
lowerCAmelCase_ = {}
# fmt: off
lowerCAmelCase_ = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowerCAmelCase_ = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowerCAmelCase_ = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowerCAmelCase_ = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowerCAmelCase_ = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowerCAmelCase_ = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowerCAmelCase_ = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowerCAmelCase_ = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowerCAmelCase_ = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowerCAmelCase_ = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowerCAmelCase_ = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowerCAmelCase_ = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowerCAmelCase_ = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowerCAmelCase_ = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowerCAmelCase_ = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowerCAmelCase_ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase_ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase_ = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 60 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase__ = remove_duplicates(key.upper() )
lowerCAmelCase__ = len(lowerCAmelCase_ )
# First fill cipher with key characters
lowerCAmelCase__ = {alphabet[i]: char for i, char in enumerate(lowerCAmelCase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCAmelCase_ ) , 26 ):
lowerCAmelCase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase__ = alphabet[i - offset]
lowerCAmelCase__ = char
return cipher_alphabet
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : dict[str, str] ):
"""simple docstring"""
return "".join(cipher_map.get(lowerCAmelCase_ , lowerCAmelCase_ ) for ch in message.upper() )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : dict[str, str] ):
"""simple docstring"""
lowerCAmelCase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCAmelCase_ , lowerCAmelCase_ ) for ch in message.upper() )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = input("Enter message to encode or decode: " ).strip()
lowerCAmelCase__ = input("Enter keyword: " ).strip()
lowerCAmelCase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowerCAmelCase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowerCAmelCase__ = create_cipher_map(lowerCAmelCase_ )
print(func(lowerCAmelCase_ , lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = '''xlm-roberta'''
def __init__( self : List[Any] , UpperCAmelCase_ : List[str]=3_0522 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-12 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[str]="absolute" , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : List[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@property
def _A ( self : Tuple ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 62 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
lowercase_ : Optional[Any] = tuple[float, float, float]
lowercase_ : Dict = tuple[float, float, float]
def A__ ( snake_case_ : Pointad , snake_case_ : Pointad ):
SCREAMING_SNAKE_CASE__: Tuple= end_pointa[0] - end_pointa[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= end_pointa[1] - end_pointa[1]
SCREAMING_SNAKE_CASE__: int= end_pointa[2] - end_pointa[2]
return (x, y, z)
def A__ ( snake_case_ : Vectorad , snake_case_ : Vectorad ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= ab[1] * ac[2] - ab[2] * ac[1] # *i
SCREAMING_SNAKE_CASE__: Tuple= (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
SCREAMING_SNAKE_CASE__: Optional[Any]= ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def A__ ( snake_case_ : Vectorad , snake_case_ : int ):
return tuple(round(snake_case_ , snake_case_ ) for x in vector ) == (0, 0, 0)
def A__ ( snake_case_ : Pointad , snake_case_ : Pointad , snake_case_ : Pointad , snake_case_ : int = 10 ):
SCREAMING_SNAKE_CASE__: int= create_vector(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: Union[str, Any]= create_vector(snake_case_ , snake_case_ )
return is_zero_vector(get_ad_vectors_cross(snake_case_ , snake_case_ ) , snake_case_ )
| 64 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
UpperCAmelCase__ : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
UpperCAmelCase__ : int = parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase__ : Union[str, Any] = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 65 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
set_seed(770)
UpperCamelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCamelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCamelCase = os.path.dirname(os.path.abspath(__file__))
UpperCamelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCamelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
_lowercase : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['file_name'] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , local_dir=SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ) -> List[str]:
if model_type == "text":
_lowercase : Optional[int] = BarkSemanticModel
_lowercase : Any = BarkSemanticConfig
_lowercase : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_lowercase : List[Any] = BarkCoarseModel
_lowercase : Optional[Any] = BarkCoarseConfig
_lowercase : Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
_lowercase : int = BarkFineModel
_lowercase : Tuple = BarkFineConfig
_lowercase : Optional[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_lowercase : int = F"""{model_type}_small""" if use_small else model_type
_lowercase : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_lowercase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
# this is a hack
_lowercase : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_lowercase : str = model_args['vocab_size']
_lowercase : Dict = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_lowercase : Tuple = model_args.pop('n_head' )
_lowercase : List[str] = model_args.pop('n_embd' )
_lowercase : Optional[int] = model_args.pop('n_layer' )
_lowercase : Union[str, Any] = ConfigClass(**checkpoint['model_args'] )
_lowercase : Union[str, Any] = ModelClass(config=SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = GenerationConfigClass()
_lowercase : Optional[int] = model_generation_config
_lowercase : Optional[int] = checkpoint['model']
# fixup checkpoint
_lowercase : Union[str, Any] = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
_lowercase : Any = k[len(SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
_lowercase : List[str] = new_k.replace(SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
_lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_lowercase : Optional[Any] = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_lowercase : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_lowercase : str = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE )
_lowercase : int = checkpoint['best_val_loss'].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE , 3 )} loss""" )
model.eval()
model.to(SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ) -> List[str]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_lowercase : Dict = 'cpu' # do conversion on cpu
_lowercase : List[str] = _get_ckpt_path(SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
_lowercase : Dict = _load_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
# load bark initial model
_lowercase : str = _bark_load_model(SCREAMING_SNAKE_CASE , 'cpu' , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
if model_type == "text":
_lowercase : Any = bark_model['model']
if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_lowercase : Any = 5
_lowercase : Optional[Any] = 10
if model_type in ["text", "coarse"]:
_lowercase : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_lowercase : Tuple = bark_model(SCREAMING_SNAKE_CASE )[0]
_lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE )
# take last logits
_lowercase : Any = output_new_model_total.logits[:, [-1], :]
else:
_lowercase : Optional[Any] = 3
_lowercase : List[Any] = 8
_lowercase : Dict = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_lowercase : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : int = bark_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
_lowercase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Dict = BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , 'config.json' ) )
_lowercase : int = BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , 'config.json' ) )
_lowercase : str = BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , 'config.json' ) )
_lowercase : List[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_lowercase : Any = BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : int = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_lowercase : Tuple = BarkConfig.from_sub_model_configs(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_lowercase : Dict = BarkModel(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = semantic
_lowercase : Any = coarseAcoustic
_lowercase : Optional[Any] = fineAcoustic
_lowercase : Dict = codec
_lowercase : List[Any] = bark_generation_config
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
bark.save_pretrained(SCREAMING_SNAKE_CASE , repo_id=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 66 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
snake_case = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
} | 67 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
raise RuntimeError("""CUDA out of memory.""" )
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ) -> str:
super().__init__()
__UpperCAmelCase =nn.Linear(3 , 4 )
__UpperCAmelCase =nn.BatchNormad(4 )
__UpperCAmelCase =nn.Linear(4 , 5 )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int:
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ) -> List[Any]:
__UpperCAmelCase =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
def _a ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase =mock_training_loop_function("""hello""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _a ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Any ):
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _a ( self : str ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : List[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _a ( self : Optional[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _a ( self : Any ) -> Tuple:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Tuple ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _a ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase =torch.cuda.memory_allocated()
__UpperCAmelCase =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =release_memory(__SCREAMING_SNAKE_CASE )
self.assertEqual(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
| 68 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("Input value must be an 'int' type" )
__snake_case = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCamelCase : Optional[int] = ["gpt2"]
lowerCamelCase : Any = "gpt2"
if is_tf_available():
class A( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoConfig.from_pretrained(A_ )
lowerCamelCase_ = TFGPTaLMHeadModel.from_config(A_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def a__ ( self : Tuple , A_ : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer(A_ )
lowerCamelCase_ = tokenized['input_ids'].to_tensor()
lowerCamelCase_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase_ = self.model(input_ids=A_ , attention_mask=A_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [GPTaTokenizer.from_pretrained(A_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase_ = [TFGPTaTokenizer.from_pretrained(A_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCamelCase_ = tokenizer([test_inputs] , return_tensors='tf' )
lowerCamelCase_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase_ = python_outputs[key].numpy()
lowerCamelCase_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(A_ , tf.intaa ) == tf_outputs_values ) )
@slow
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.function(A_ )
for test_inputs in self.test_sentences:
lowerCamelCase_ = tf.constant(A_ )
lowerCamelCase_ = compiled_tokenizer(A_ )
lowerCamelCase_ = tf_tokenizer(A_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = ModelToSave(tokenizer=A_ )
lowerCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_ = model.serving(A_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(A_ ) / 'saved.model'
tf.saved_model.save(A_ , A_ , signatures={'serving_default': model.serving} )
lowerCamelCase_ = tf.saved_model.load(A_ )
lowerCamelCase_ = loaded_model.signatures['serving_default'](A_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_ = tf_tokenizer(A_ ) # Build model with some sample inputs
lowerCamelCase_ = tf_tokenizer.get_config()
lowerCamelCase_ = TFGPTaTokenizer.from_config(A_ )
lowerCamelCase_ = model_from_config(A_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase_ = 123123
for max_length in [3, 5, 1024]:
lowerCamelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase_ = tf_tokenizer(A_ , max_length=A_ )
lowerCamelCase_ = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 70 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _snake_case (enum.Enum):
__A : Tuple =0
__A : List[Any] =1
__A : Optional[int] =2
@add_end_docstrings(__SCREAMING_SNAKE_CASE)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : str ="\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self ,*_snake_case ,**_snake_case ):
super().__init__(*_snake_case ,**_snake_case )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : List[Any] = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : int = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._sanitize_parameters(prefix=_snake_case ,**self._forward_params )
UpperCAmelCase_ : Optional[Any] = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : Tuple = {**self._forward_params, **forward_params}
def UpperCamelCase__ ( self ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,**_snake_case ,):
UpperCAmelCase_ : Tuple = {}
if prefix is not None:
UpperCAmelCase_ : Tuple = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_snake_case ,padding=_snake_case ,add_special_tokens=_snake_case ,return_tensors=self.framework )
UpperCAmelCase_ : Dict = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCAmelCase_ : Dict = handle_long_generation
preprocess_params.update(_snake_case )
UpperCAmelCase_ : List[str] = generate_kwargs
UpperCAmelCase_ : List[str] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : int = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : Dict = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : int = self.tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
if len(_snake_case ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_snake_case ,**_snake_case )
def __call__( self ,_snake_case ,**_snake_case ):
return super().__call__(_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="" ,_snake_case=None ,**_snake_case ):
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text ,padding=_snake_case ,add_special_tokens=_snake_case ,return_tensors=self.framework )
UpperCAmelCase_ : int = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : Optional[int] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : List[str] = generate_kwargs["max_new_tokens"]
else:
UpperCAmelCase_ : int = generate_kwargs.get("max_length" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCAmelCase_ : List[str] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCamelCase__ ( self ,_snake_case ,**_snake_case ):
UpperCAmelCase_ : Optional[int] = model_inputs["input_ids"]
UpperCAmelCase_ : Optional[Any] = model_inputs.get("attention_mask" ,_snake_case )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Dict = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Tuple = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : Tuple = generate_kwargs.pop("prefix_length" ,0 )
if prefix_length > 0:
UpperCAmelCase_ : Tuple = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : int = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : int = self.model.generate(input_ids=_snake_case ,attention_mask=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : Optional[Any] = generated_sequence.reshape(_snake_case ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : Any = tf.reshape(_snake_case ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=ReturnType.FULL_TEXT ,_snake_case=True ):
UpperCAmelCase_ : int = model_outputs["generated_sequence"][0]
UpperCAmelCase_ : int = model_outputs["input_ids"]
UpperCAmelCase_ : List[Any] = model_outputs["prompt_text"]
UpperCAmelCase_ : Optional[int] = generated_sequence.numpy().tolist()
UpperCAmelCase_ : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Dict = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : str = 0
else:
UpperCAmelCase_ : List[str] = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ,) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : List[str] = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Tuple = text[prompt_length:]
UpperCAmelCase_ : Tuple = {"generated_text": all_text}
records.append(_snake_case )
return records
| 71 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowercase =np.full((len(lowercase_ ), sequence_length, 2) , lowercase_ )
else:
lowercase =np.full((len(lowercase_ ), sequence_length) , lowercase_ )
for i, tensor in enumerate(lowercase_ ):
if padding_side == "right":
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
else:
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
return out_tensor.tolist()
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> str:
'''simple docstring'''
lowercase =ord(lowercase_ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
lowercase =unicodedata.category(lowercase_ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -1_00
UpperCamelCase__ = "pt"
def _A( self , snake_case_ ):
import torch
lowercase ='''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase =[feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase =self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase =torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase =self.tokenizer.padding_side
if padding_side == "right":
lowercase =[
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
lowercase =[
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
lowercase =[feature['''ner_tags'''] for feature in features]
lowercase =padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
lowercase =[feature['''original_entity_spans'''] for feature in features]
lowercase =padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
lowercase ={k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 72 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(number**0.5)
return number == sq * sq
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE = x_den * y_den * z_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase__ (_UpperCAmelCase = 35):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Fraction(0)
SCREAMING_SNAKE_CASE = 42
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE = x_den * y_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=2
SCREAMING_SNAKE_CASE = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase) and is_sq(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=-1
SCREAMING_SNAKE_CASE = x_num * y_num
SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=2
SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase) and is_sq(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase_ = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , _A : str = "" , _A : Optional[str] = None , _A : Optional[dict] = None , **_A : List[Any] ):
"""simple docstring"""
super().__init__(self , **_A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__SCREAMING_SNAKE_CASE : Tuple = fsspec.open(
_A , mode='''rb''' , protocol=_A , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
__SCREAMING_SNAKE_CASE : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__SCREAMING_SNAKE_CASE : Any = None
@classmethod
def UpperCAmelCase__ ( cls : List[str] , _A : Optional[int] ):
"""simple docstring"""
return super()._strip_protocol(_A ).lstrip('''/''' )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE : List[str] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {f['''name''']: f}
def UpperCAmelCase__ ( self : Dict , _A : str ):
"""simple docstring"""
return self.file.open().read()
def UpperCAmelCase__ ( self : Any , _A : str , _A : str = "rb" , _A : Tuple=None , _A : Union[str, Any]=True , _A : Dict=None , **_A : List[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self._strip_protocol(_A )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''bz2'''
lowerCAmelCase_ = '''bz2'''
lowerCAmelCase_ = '''.bz2'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''gzip'''
lowerCAmelCase_ = '''gzip'''
lowerCAmelCase_ = '''.gz'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''lz4'''
lowerCAmelCase_ = '''lz4'''
lowerCAmelCase_ = '''.lz4'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''xz'''
lowerCAmelCase_ = '''xz'''
lowerCAmelCase_ = '''.xz'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''zstd'''
lowerCAmelCase_ = '''zstd'''
lowerCAmelCase_ = '''.zst'''
def __init__( self : str , _A : str , _A : str = "rb" , _A : Optional[str] = None , _A : Optional[dict] = None , _A : int = DEFAULT_BLOCK_SIZE , **_A : str , ):
"""simple docstring"""
super().__init__(
fo=_A , mode=_A , target_protocol=_A , target_options=_A , block_size=_A , **_A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__SCREAMING_SNAKE_CASE : Optional[Any] = self.file.__enter__
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : int , _A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = file_
def __enter__( self : int ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Any , *_A : int , **_A : List[Any] ):
"""simple docstring"""
self._file.__exit__(*_A , **_A )
def __iter__( self : List[Any] ):
"""simple docstring"""
return iter(self._file )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : Dict , _A : Optional[int] ):
"""simple docstring"""
return getattr(self._file , _A )
def fixed_enter(*_A : List[str] , **_A : str ):
return WrappedFile(_enter(*_A , **_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = fixed_enter
| 74 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
'''simple docstring'''
import os
import sys
UpperCamelCase__ = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase__ = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
return AutoConfig.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
return AutoTokenizer.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return AutoModel.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 75 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip_text_model"
def __init__( self , UpperCamelCase_=3_05_24 , UpperCamelCase_=7_68 , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=5_12 , UpperCamelCase_="gelu" , UpperCamelCase_=1E-12 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3_05_22 , UpperCamelCase_=2 , UpperCamelCase_=0 , UpperCamelCase_=1_02 , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ) -> Dict:
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , sep_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Dict = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = encoder_hidden_size
__lowercase : Optional[Any] = intermediate_size
__lowercase : str = projection_dim
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Tuple = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Tuple = max_position_embeddings
__lowercase : List[str] = layer_norm_eps
__lowercase : List[str] = hidden_act
__lowercase : List[Any] = initializer_range
__lowercase : str = attention_probs_dropout_prob
__lowercase : Union[str, Any] = is_decoder
__lowercase : Optional[Any] = use_cache
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_ )
__lowercase ,__lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__lowercase : Tuple = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip_vision_model"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=5_12 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3_84 , UpperCamelCase_=16 , UpperCamelCase_="gelu" , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=1E-10 , **UpperCamelCase_ , ) -> int:
super().__init__(**UpperCamelCase_ )
__lowercase : Dict = hidden_size
__lowercase : Tuple = intermediate_size
__lowercase : Any = projection_dim
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Union[str, Any] = patch_size
__lowercase : Tuple = image_size
__lowercase : Optional[int] = initializer_range
__lowercase : int = attention_dropout
__lowercase : List[Any] = layer_norm_eps
__lowercase : List[str] = hidden_act
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_ )
__lowercase ,__lowercase : Optional[Any] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__lowercase : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip"
UpperCamelCase =True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=5_12 , UpperCamelCase_=2.6_5_9_2 , UpperCamelCase_=2_56 , **UpperCamelCase_ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase_ )
if text_config is None:
__lowercase : Tuple = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
__lowercase : Optional[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
__lowercase : Tuple = BlipTextConfig(**UpperCamelCase_ )
__lowercase : Optional[int] = BlipVisionConfig(**UpperCamelCase_ )
__lowercase : str = self.vision_config.hidden_size
__lowercase : List[str] = projection_dim
__lowercase : Optional[Any] = logit_scale_init_value
__lowercase : Any = 1.0
__lowercase : str = 0.0_2
__lowercase : int = image_text_hidden_size
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : str = copy.deepcopy(self.__dict__ )
__lowercase : Union[str, Any] = self.text_config.to_dict()
__lowercase : Tuple = self.vision_config.to_dict()
__lowercase : List[Any] = self.__class__.model_type
return output
| 76 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A = logging.get_logger(__name__)
A = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class a__ ( __magic_name__ ):
lowercase_ = "conditional_detr"
lowercase_ = ["past_key_values"]
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : int=300 , UpperCamelCase_ : List[Any]=6 , UpperCamelCase_ : Optional[int]=2048 , UpperCamelCase_ : str=8 , UpperCamelCase_ : Tuple=6 , UpperCamelCase_ : int=2048 , UpperCamelCase_ : Optional[Any]=8 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int="relu" , UpperCamelCase_ : Optional[int]=256 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : str=1.0 , UpperCamelCase_ : Any=False , UpperCamelCase_ : Union[str, Any]="sine" , UpperCamelCase_ : Tuple="resnet50" , UpperCamelCase_ : str=True , UpperCamelCase_ : Any=False , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=1 , UpperCamelCase_ : str=1 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=5 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[int]=0.25 , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
__UpperCAmelCase : Optional[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : int = backbone_config.get("model_type")
__UpperCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : int = config_class.from_dict(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = use_timm_backbone
__UpperCAmelCase : Optional[int] = backbone_config
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Optional[int] = num_queries
__UpperCAmelCase : List[Any] = d_model
__UpperCAmelCase : int = encoder_ffn_dim
__UpperCAmelCase : Optional[int] = encoder_layers
__UpperCAmelCase : Optional[int] = encoder_attention_heads
__UpperCAmelCase : List[Any] = decoder_ffn_dim
__UpperCAmelCase : Optional[Any] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Union[str, Any] = dropout
__UpperCAmelCase : Optional[int] = attention_dropout
__UpperCAmelCase : List[Any] = activation_dropout
__UpperCAmelCase : Tuple = activation_function
__UpperCAmelCase : List[Any] = init_std
__UpperCAmelCase : List[str] = init_xavier_std
__UpperCAmelCase : Dict = encoder_layerdrop
__UpperCAmelCase : Optional[int] = decoder_layerdrop
__UpperCAmelCase : int = encoder_layers
__UpperCAmelCase : Optional[int] = auxiliary_loss
__UpperCAmelCase : Optional[int] = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : List[Any] = use_pretrained_backbone
__UpperCAmelCase : Optional[Any] = dilation
# Hungarian matcher
__UpperCAmelCase : List[str] = class_cost
__UpperCAmelCase : Optional[int] = bbox_cost
__UpperCAmelCase : Dict = giou_cost
# Loss coefficients
__UpperCAmelCase : Dict = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : str = cls_loss_coefficient
__UpperCAmelCase : int = bbox_loss_coefficient
__UpperCAmelCase : str = giou_loss_coefficient
__UpperCAmelCase : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_)
@property
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return self.encoder_attention_heads
@property
def a_ ( self : int):
"""simple docstring"""
return self.d_model
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : Tuple = self.__class__.model_type
return output
class a__ ( __magic_name__ ):
lowercase_ = version.parse("1.11" )
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
])
@property
def a_ ( self : Tuple):
"""simple docstring"""
return 1e-5
@property
def a_ ( self : Tuple):
"""simple docstring"""
return 12
| 77 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _lowercase (self : Any ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def _lowercase (self : List[Any] ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def _lowercase (self : Dict ):
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =Accelerator()
SCREAMING_SNAKE_CASE_: Dict =(accelerator.state.process_index + 2, 10)
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.randint(0, 10, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE_: Optional[Any] =''
SCREAMING_SNAKE_CASE_: Union[str, Any] =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE_: List[str] =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE_: List[str] =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 78 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
SCREAMING_SNAKE_CASE__ : Dict = """</w>"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """@@ """
def _lowerCamelCase ( __lowerCamelCase ) -> str:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Any = char
return pairs
# Speech2Text2 has no max input length
SCREAMING_SNAKE_CASE__ : Any = {"""facebook/s2t-wav2vec2-large-en-de""": 10_24}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase=False , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : Optional[Any] = do_lower_case
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ : List[Any] = json.load(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Union[str, Any] = None
else:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("""\n""" )[:-1]
UpperCAmelCase__ : Any = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ : Optional[Any] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase__ : List[str] = {}
@property
def __UpperCAmelCase ( self ):
return len(self.decoder )
def __UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Dict = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : int = bigram
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : List[str] = 0
while i < len(_lowerCAmelCase ):
try:
UpperCAmelCase__ : Any = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Optional[Any] = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Tuple = tuple(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
UpperCAmelCase__ : str = get_pairs(_lowerCAmelCase )
UpperCAmelCase__ : Dict = """ """.join(_lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ : Any = """\n""" + BPE_TOKEN_MERGES
if word.endswith(_lowerCAmelCase ):
UpperCAmelCase__ : Tuple = word.replace(_lowerCAmelCase , """""" )
UpperCAmelCase__ : str = word.replace(""" """ , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = word
return word
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
UpperCAmelCase__ : Optional[int] = text.lower()
UpperCAmelCase__ : Optional[Any] = text.split()
UpperCAmelCase__ : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Dict = self.decoder.get(_lowerCAmelCase , self.unk_token )
return result
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = """ """.join(_lowerCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ : List[str] = """""".join(string.split(_lowerCAmelCase ) )
return string
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : List[Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
UpperCAmelCase__ : Tuple = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ : str = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 79 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.